diff --git a/.github/workflows/go.yml b/.github/workflows/go.yml
new file mode 100644
index 00000000..06bf30a3
--- /dev/null
+++ b/.github/workflows/go.yml
@@ -0,0 +1,29 @@
+# This workflow will build a golang project
+# For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-go
+
+name: Go
+
+on:
+ push:
+ branches: [ "dev", "branch-2.0", "branch-2.1", "branch-3.0" ]
+ pull_request:
+
+jobs:
+ build:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v3
+
+ - name: Set up Go
+ uses: actions/setup-go@v4
+ with:
+ go-version: '1.20'
+
+ - name: Format
+ run: if [ "$(gofmt -s -l . | wc -l)" -gt 0 ]; then exit 1; fi
+
+ - name: Build
+ run: make
+
+ - name: Test
+ run: make test
diff --git a/.github/workflows/golangci-lint.yml b/.github/workflows/golangci-lint.yml
new file mode 100644
index 00000000..fda09ab1
--- /dev/null
+++ b/.github/workflows/golangci-lint.yml
@@ -0,0 +1,29 @@
+name: golangci-lint
+on:
+ push:
+ branches:
+ - main
+ - dev
+ - branch-3.0
+ - branch-2.1
+ - branch-2.0
+ pull_request:
+
+permissions:
+ contents: read
+ # Optional: allow read access to pull request. Use with `only-new-issues` option.
+ # pull-requests: read
+
+jobs:
+ golangci:
+ name: lint
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v4
+ - uses: actions/setup-go@v5
+ with:
+ go-version: '1.20'
+ - name: golangci-lint
+ uses: golangci/golangci-lint-action@v6
+ with:
+ version: v1.60
diff --git a/.gitignore b/.gitignore
index 96cea04b..0773631c 100644
--- a/.gitignore
+++ b/.gitignore
@@ -4,4 +4,4 @@
bin
output
ccr.db
-backup
+tarball
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 358b6657..774d4191 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,16 +1,169 @@
# 更新日志
-## v 0.5
+### Fix
+
+## 3.0.4/2.1.8
+
+注意:从这个版本开始 doris 和 ccr-syncer 的 2.0 版本将不再更新,需要使用 ccr-syncer 的需要先升级到 2.1 及以上版本。
+
+### Fix
+
+- 修复 table name 中带 `-` 无法同步的问题 (selectdb/ccr-syncer#168)
+- 修复部分同步下可能同步多次增量数据的问题 (selectdb/ccr-syncer#186)
+- 修复 create 又立即 drop 的情况下无法找到 table 的问题 (selectdb/ccr-syncer#188)
+- 跳过不支持的 table 类型,比如 ES TABLE
+- 避免在同步快照、binlog 期间对上游 name 产生依赖 (selectdb/ccr-syncer#205, selectdb/ccr-syncer#239)
+- 修复全量同步期间 view 的别名问题 (selectdb/ccr-syncer#207)
+- 修复 add partition with keyword name 的问题 (selectdb/ccr-syncer#212)
+- 跳过 drop tmp partition (selectdb/ccr-syncer#214)
+- 修复快照过期的问题,过期后会重做 (selectdb/ccr-syncer#229)
+- 修复 rename 导致的上下游 index name 无法匹配的问题 (selectdb/ccr-syncer#235)
+- 修复并行创建 table/backup 时 table 丢失的问题 (selectdb/ccr-syncer#237)
+- 修复 partial snapshot 期间,上游 table/partition 已经被删除/重命名/替换的问题 (selectdb/ccr-syncer#240, selectdb/ccr-syncer#241, selectdb/ccr-syncer#249, selectdb/ccr-syncer#255)
+- 检查 database connection 错误 (selectdb/ccr-syncer#247)
+- 过滤已经被删除的 table (selectdb/ccr-syncer#248)
+- 修复 create table 时下游 table 已经存在的问题 (selectdb/ccr-syncer#161)
+
+### Feature
+
+- 支持 atomic restore,全量同步期间下游仍然可读 (selectdb/ccr-syncer#166)
+- 支持处理包装在 barrier log 中的其他 binlog (主要用于在 2.0/2.1 上增加新增的 binlog 类型)(selectdb/ccr-syncer#208)
+- 支持 rename table (2.1) (selectdb/ccr-syncer#209)
+- 跳过 modify partition binlog (selectdb/ccr-syncer#213)
+- 支持 modify comment binlog (selectdb/ccr-syncer#140)
+- 支持 replace table binlog (selectdb/ccr-syncer#245)
+- 支持 drop view binlog (selectdb/ccr-syncer#138)
+- 支持 modify view def binlog (selectdb/ccr-syncer#184)
+- 支持 inverted index 相关 binlog (selectdb/ccr-syncer#252)
+- 支持 table sync 下的 txn insert (WIP) (selectdb/ccr-syncer#234, selectdb/ccr-syncer#259)
+- 支持 rename partition/rollup binlogs (selectdb/ccr-syncer#268)
+- 支持 add/drop rollup binlogs (selectdb/ccr-syncer#269)
+- 支持 modify view/comment in 2.1 (selectdb/ccr-syncer#270, selectdb/ccr-syncer#273)
+- 支持 table sync 下的 replace table (selectdb/ccr-syncer#279)
+
+### Improve
+
+- 支持同步 rename column,需要 doris xxxx (selectdb/ccr-syncer#139)
+- 支持在全量同步过程中,遇到 table signature 不匹配时,使用 alias 替代 drop (selectdb/ccr-syncer#179)
+- 增加 monitor,在日志中 dump 内存使用率 (selectdb/ccr-syncer#181)
+- 过滤 schema change 删除的 indexes,避免全量同步 (selectdb/ccr-syncer#185)
+- 过滤 schema change 创建的 shadow indexes 的更新,避免全量同步 (selectdb/ccr-syncer#187)
+- 增加 `mysql_max_allowed_packet` 参数,控制 mysql sdk 允许发送的 packet 大小 (selectdb/ccr-syncer#196)
+- 限制一个 JOB 中单个 BE 的 ingest 并发数,减少对 BE 的连接数和文件描述符消耗 (selectdb/ccr-syncer#195)
+- 避免在获取 job status 等待锁 (selectdb/ccr-syncer#198)
+- 避免 backup/restore 任务阻塞查询 ccr job progress (selectdb/ccr-syncer#201, selectdb/ccr-syncer#206)
+- 避免将 snapshot job info 和 meta (这两个数据可能非常大)持久化到 mysql 中 (selectdb/ccr-syncer#204)
+- 上游 db 中没有 table 时,打印 info 而不是 error (selectdb/ccr-syncer#211)
+- 在 ccr syncer 重启后,复用由当前 job 发起的 backup/restore job (selectdb/ccr-syncer#218, selectdb/ccr-syncer#224, selectdb/ccr-syncer#226)
+- 支持读取压缩后的快照/恢复快照时压缩,避免碰到 thrift max message size 限制 (selectdb/ccr-syncer#223)
+- API job_progress 避免返回 persist data (selectdb/ccr-syncer#271)
+
+## 2.0.15/2.1.6
+
+### Fix
+
+- 修复 `REPLACE_IF_NOT_NULL` 语句的默认值语法不兼容问题 (selectdb/ccr-syncer#180)
+- 修复 table sync 下 partial snapshot 没有更新 dest table id 的问题 (selectdb/ccr-syncer#178)
+- **修复 table sync with alias 时,lightning schema change 找不到 table 的问题** (selectdb/ccr-syncer#176)
+- 修复 db sync 下 partial snapshot table 为空的问题 (selectdb/ccr-syncer#173)
+- 修复 create table 时下游 view 已经存在的问题(先删除 view),feature gate: `feature_create_view_drop_exists` (selectdb/ccr-syncer#170,selectdb/ccr-syncer#171)
+- 修复 table not found 时没有 rollback binlog 的问题
+- **修复下游删表后重做 snapshot 是 table mapping 过期的问题 (selectdb/ccr-syncer#162,selectdb/ccr-syncer#163,selectdb/ccr-syncer#164)**
+- 修复 full sync 期间 view already exists 的问题,如果 signature 不匹配会先删除 (selectdb/ccr-syncer#152)
+- 修复 2.0 中 get view 逻辑,兼容 default_cluster 语法 (selectdb/ccr-syncer#149)
+- 修复 job state 变化时仍然更新了 job progress 的问题,对之前的逻辑无影响,主要用于支持 partial sync (selectdb/ccr-syncer#124)
+- 修复 get_lag 接口中不含 lag 的问题 (selectdb/ccr-syncer#126)
+- 修复下游 restore 时未清理 orphan tables/partitions 的问题 (selectdb/ccr-syncer#128)
+ - 备注: 暂时禁用,因为 doris 侧发现了 bug (selectdb/ccr-syncer#153,selectdb/ccr-syncer#161)
+- **修复下游删表后重做 snapshot 时 dest meta cache 过期的问题 (selectdb/ccr-syncer#132)**
+
+### Feature
+
+- 增加 `/force_fullsync` 用于强制触发 fullsync (selectdb/ccr-syncer#167)
+- 增加 `/features` 接口,用于列出当前有哪些 feature 以及是否打开 (selectdb/ccr-syncer#175)
+- 支持同步 drop view(drop table 失败后使用 drop view 重试)(selectdb/ccr-syncer#169)
+- 支持同步 rename 操作 (selectdb/ccr-syncer#147)
+- schema change 使用 partial sync 而不是 fullsync (selectdb/ccr-syncer#151)
+- partial sync 使用 rename 而不是直接修改 table,因此表的读写在同步过程中不受影响 (selectdb/ccr-syncer#148)
+- 支持 partial sync,减少需要同步的数据量 (selectdb/ccr-syncer#125)
+- 添加参数 `allowTableExists`,允许在下游 table 存在时,仍然创建 ccr job(如果 schema 不一致,会自动删表重建)(selectdb/ccr-syncer#136)
+
+### Improve
+
+- 日志输出 milliseconds (selectdb/ccr-syncer#182)
+- 如果下游表的 schema 不一致,则将表移动到 RecycleBin 中(之前是强制删除)(selectdb/ccr-syncer#137)
+
+## 2.0.14/2.1.5
+
+### Fix
+
+- 过滤已经删除的 partitions,避免 full sync,需要 doris 2.0.14/2.1.5 (selectdb/ccr-syncer#117)
+- 过滤已经删除的 tables,避免 full sync (selectdb/ccr-syncer#123)
+- 兼容 doris 3.0 alternative json name,doris 3.0 必须使用该版本的 CCR syncer (selectdb/ccr-syncer#121)
+- 修复 list jobs 接口在高可用环境下不可用的问题 (selectdb/ccr-syncer#120)
+
+## 2.0.11
+
+对应 doris 2.0.11。
+
+### Feature
+
+- 支持以 postgresql 作为 ccr-syncer 的元数据库 (selectdb/ccr-syncer#77)
+- 支持 insert overwrite 相关操作 (selectdb/ccr-syncer#97,selectdb/ccr-syncer#99)
+
+### Fix
+
+- 修复 drop partition 后因找不到 partition id 而无法继续同步的问题 (selectdb/ccr-syncer#82)
+- 修复高可用模式下接口无法 redirect 的问题 (selectdb/ccr-syncer#81)
+- 修复 binlog 可能因同步失败而丢失的问题 (selectdb/ccr-syncer#86,selectdb/ccr-syncer#91)
+- 修改 connect 和 rpc 超时时间默认值,connect 默认 10s,rpc 默认 30s (selectdb/ccr-syncer#94,selectdb/ccr-syncer#95)
+- 修复 view 和 materialized view 使用造成空指针问题 (selectdb/ccr-syncer#100)
+- 修复 add partition sql 错误的问题 (selectdb/ccr-syncer#99)
+
+
+## 2.1.3/2.0.3.10
+
+### Fix
+
+- 修复因与上下游 FE 网络中断而触发 full sync 的问题
+
+### Feature
+
+- 增加 `/job_progress` 接口用于获取 JOB 进度
+- 增加 `/job_details` 接口用于获取 JOB 信息
+- 保留 job 状态变更的各个时间点,并在 `/job_progress` 接口中展示
+
+### Fix
+
+- 修复若干 keywords 没有 escape 的问题
+
+## 2.0.3.9
+
+配合 doris 2.0.9 版本
+
+### Feature
+
+- 添加选项以启动 pprof server
+- 允许配置 rpc 合 connection 超时
+
+### Fix
+
+- restore 每次重试时使用不同的 label 名
+- update table 失败时(目标表不存在)会触发快照同步
+- 修复同步 sql 中包含关键字的问题
+- 如果恢复时碰到表 schema 发生变化,会先删表再重试恢复
+
+## 0.5
### 支持高可用
- 现在可以部署多个Syncer节点来保证CCR功能的高可用。
- db是Syncer集群划分的依据,同一个集群下的Syncer共用一个db。
- Syncer集群采用对称设计,每个Syncer都会相对独立的执行被分配到的job。在某个Syncer节点down掉后,它的jobs会依据负载均衡算法被分给其他Syncer节点。
-## v 0.4
+## 0.4
* 增加 enable_db_binlog.sh 方便用户对整库开启binlog
-## v 0.3
+## 0.3
### LOG
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 00000000..e0741144
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,20 @@
+This free license agreement is made between SelectDB Inc. (hereinafter referred to as "SelectDB", "we", "our" or "us") and the users ("user", "your" or "you") of the SelectDB products. SelectDB products refer to the software and services provided by SelectDB, including any updates, error fixes, and documentation. You acknowledge that you have fully read, understood and accepted this agreement in its entirety before you begin a trial or purchase of SelectDB products or services. You agree that by clicking the "agree" box or similar or using our product and services, you are agreeing to enter to this agreement, which is a legally binding contract between you and SelectDB. If you do not agree with any provision of this agreement, you must not purchase or use any of our services.
+SelectDB reserves the right to change our products and services in accordance with applicable laws and our corporate policies without prior notice. We will post the changes on SelectDB.io. You agree that by continuing to use our services after the announcement of any changes to this agreement, you acknowledge that you have fully read, understood and accepted the modified products and services and will use our products and services in accordance with the modified agreement. If you disagree with any changes, you should no longer use our products and services.
+License Rights and Limitations
+SelectDB grants you a free, non-exclusive, non-transferable, limited license to use the SelectDB products.
+- You have the right to install and use the SelectDB product or service on multiple computers, and run it for your personal use or internal business operations, subject to the terms of this agreement.
+- You may redistribute the unmodified software and product documentation in accordance with the terms of this agreement, provided that you do not charge any fees related to such distribution or use of the software.
+The effectiveness of your license is subject to the following conditions:
+- You shall not remove any proprietary notices or markings of SelectDB or the licensor from the software or documentation.
+- You shall not modify, reverse engineer, decompile, or attempt to extract the source code of the software.
+- You shall not use the software for illegal purposes or violate any applicable laws or regulations.
+Intellectual Property Rights
+- All intellectual property rights of SelectDB, including but not limited to copyrights, patents, and trademarks, are owned by the licensor.
+- This agreement does not grant the user any intellectual property rights.
+Disclaimer
+- The software is provided "as is" without any warranties, representations, conditions, or guarantees of any kind.
+- The licensor does not provide any warranties regarding the suitability, merchantability, accuracy, reliability, or any other aspect of the software.
+- To the maximum extent permitted by applicable law, the licensor shall not be liable for any direct, indirect, incidental, special, or consequential damages arising from the use of the software.
+Miscellaneous
+- This agreement constitutes the entire agreement between the licensor and the user regarding the use of the software and supersedes any prior oral or written agreements.
+- This agreement shall be governed by the laws of Singapore in terms of interpretation, validity, and performance.
diff --git a/Makefile b/Makefile
index c9afe40c..5bc8812d 100644
--- a/Makefile
+++ b/Makefile
@@ -9,6 +9,31 @@ tag := $(shell git describe --abbrev=0 --always --dirty --tags)
sha := $(shell git rev-parse --short HEAD)
git_tag_sha := $(tag):$(sha)
+ifeq ($(shell uname -i),x86_64)
+ # Make them happy
+ platform := x64
+else
+ platform := arm64
+endif
+tarball_suffix := $(tag)-$(platform)
+
+LDFLAGS="-X 'github.com/selectdb/ccr_syncer/pkg/version.GitTagSha=$(git_tag_sha)'"
+GOFLAGS=
+
+GOFORMAT := gofmt -l -d -w
+
+# COVERAGE=ON make
+ifeq ($(COVERAGE),ON)
+ GOFLAGS += -cover
+endif
+
+.PHONY: flag_coverage
+## COVERAGE=ON : Set coverage flag
+
+.PHONY: default
+## default: Build ccr_syncer
+default: ccr_syncer
+
.PHONY: build
## build : Build binary
build: ccr_syncer get_binlog ingest_binlog get_meta snapshot_op get_master_token spec_checker rows_parse
@@ -27,12 +52,12 @@ lint:
.PHONY: fmt
## fmt : Format all code
fmt:
- $(V)go fmt ./...
+ $(V)$(GOFORMAT) .
.PHONY: test
## test : Run test
test:
- $(V)go test $(shell go list ./... | grep -v github.com/selectdb/ccr_syncer/cmd | grep -v github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/) | grep -F -v '[no test files]'
+ $(V)go test $(shell go list ./... | grep -v github.com/selectdb/ccr_syncer/cmd | grep -v github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/)
.PHONY: help
## help : Print help message
@@ -42,11 +67,24 @@ help: Makefile
# --------------- ------------------ ---------------
# --------------- User Defined Tasks ---------------
-.PHONY: cmd/ccr_syncer
+
+.PHONY: cloc
+## cloc : Count lines of code
+cloc:
+ $(V)tokei -C . -e pkg/rpc/kitex_gen -e pkg/rpc/thrift
+
+.PHONY: gen_mock
+## gen_mock : Generate mock
+gen_mock:
+ $(V)mockgen -source=pkg/rpc/fe.go -destination=pkg/ccr/fe_mock.go -package=ccr
+ $(V)mockgen -source=pkg/ccr/metaer.go -destination=pkg/ccr/metaer_mock.go -package=ccr
+ $(V)mockgen -source=pkg/ccr/metaer_factory.go -destination=pkg/ccr/metaer_factory_mock.go -package=ccr
+ $(V)mockgen -source=pkg/rpc/rpc_factory.go -destination=pkg/ccr/rpc_factory_mock.go -package=ccr
+
.PHONY: ccr_syncer
## ccr_syncer : Build ccr_syncer binary
ccr_syncer: bin
- $(V)go build -ldflags "-X github.com/selectdb/ccr_syncer/pkg/version.GitTagSha=$(git_tag_sha)" -o bin/ccr_syncer ./cmd/ccr_syncer
+ $(V)go build ${GOFLAGS} -ldflags ${LDFLAGS} -o bin/ccr_syncer ./cmd/ccr_syncer
.PHONY: get_binlog
## get_binlog : Build get_binlog binary
@@ -59,9 +97,9 @@ run_get_binlog: get_binlog
.PHONY: sync_thrift
## sync_thrift : Sync thrift
-# TODO(Drogon): Add build thrift
sync_thrift:
- $(V)rsync -avc $(THRIFT_DIR)/ rpc/thrift/
+ $(V)rsync -avc $(THRIFT_DIR)/ pkg/rpc/thrift/
+ $(V)$(MAKE) -C pkg/rpc/ gen_thrift
.PHONY: ingest_binlog
## ingest_binlog : Build ingest_binlog binary
@@ -98,7 +136,29 @@ get_lag: bin
rows_parse: bin
$(V)go build -o bin/rows_parse ./cmd/rows_parse
+.PHONY: thrift_get_meta
+## thrift_get_meta : Build thrift_get_meta binary
+thrift_get_meta: bin
+ $(V)go build -o bin/thrift_get_meta ./cmd/thrift_get_meta
+
+.PHONY: metrics
+## metrics : Build metrics binary
+metrics: bin
+ $(V)go build -o bin/metrics ./cmd/metrics
+
.PHONY: todos
## todos : Print all todos
todos:
- $(V)grep -rnw . -e "TODO" | grep -v '^./rpc/thrift' | grep -v '^./.git'
\ No newline at end of file
+ $(V)grep -rnw . -e "TODO" | grep -v '^./pkg/rpc/thrift' | grep -v '^./.git'
+
+.PHONY: tarball
+## tarball : Archive files and release ccr-syncer-$(version)-$(platform).tar.xz
+tarball: default
+ $(V)mkdir -p tarball/ccr-syncer-$(tarball_suffix)/{bin,db,doc,log}
+ $(V)cp CHANGELOG.md README.md LICENSE tarball/ccr-syncer-$(tarball_suffix)/
+ $(V)cp bin/ccr_syncer tarball/ccr-syncer-$(tarball_suffix)/bin/
+ $(V)cp shell/{enable_db_binlog.sh,start_syncer.sh,stop_syncer.sh} tarball/ccr-syncer-$(tarball_suffix)/bin/
+ $(V)cp -r doc/* tarball/ccr-syncer-$(tarball_suffix)/doc/
+ $(V)cd tarball/ && tar cfJ ccr-syncer-$(tarball_suffix).tar.xz ccr-syncer-$(tarball_suffix)
+ $(V)echo archive: tarball/ccr-syncer-$(tarball_suffix).tar.xz
+
diff --git a/build.sh b/build.sh
old mode 100644
new mode 100755
diff --git a/cmd/ccr_syncer/ccr_syncer.go b/cmd/ccr_syncer/ccr_syncer.go
index 2e08a097..64f997e7 100644
--- a/cmd/ccr_syncer/ccr_syncer.go
+++ b/cmd/ccr_syncer/ccr_syncer.go
@@ -3,6 +3,8 @@ package main
import (
"flag"
"fmt"
+ "net/http"
+ _ "net/http/pprof"
"os"
"sync"
"syscall"
@@ -17,6 +19,8 @@ import (
"github.com/selectdb/ccr_syncer/pkg/version"
"github.com/selectdb/ccr_syncer/pkg/xerror"
+ "github.com/hashicorp/go-metrics"
+ "github.com/hashicorp/go-metrics/prometheus"
log "github.com/sirupsen/logrus"
)
@@ -29,6 +33,8 @@ type Syncer struct {
Db_port int
Db_user string
Db_password string
+ Pprof bool
+ Ppof_port int
}
var (
@@ -49,6 +55,8 @@ func init() {
flag.StringVar(&syncer.Host, "host", "127.0.0.1", "syncer host")
flag.IntVar(&syncer.Port, "port", 9190, "syncer port")
+ flag.IntVar(&syncer.Ppof_port, "pprof_port", 6060, "pprof port used for memory analyze")
+ flag.BoolVar(&syncer.Pprof, "pprof", false, "use pprof or not")
flag.Parse()
utils.InitLog()
@@ -74,6 +82,8 @@ func main() {
db, err = storage.NewSQLiteDB(dbPath)
case "mysql":
db, err = storage.NewMysqlDB(syncer.Db_host, syncer.Db_port, syncer.Db_user, syncer.Db_password)
+ case "postgresql":
+ db, err = storage.NewPostgresqlDB(syncer.Db_host, syncer.Db_port, syncer.Db_user, syncer.Db_password)
default:
err = xerror.Wrap(err, xerror.Normal, "new meta db failed.")
}
@@ -82,7 +92,7 @@ func main() {
}
// Step 2: init factory
- factory := ccr.NewFactory(rpc.NewRpcFactory(), ccr.NewMetaFactory(), base.NewSpecerFactory())
+ factory := ccr.NewFactory(rpc.NewRpcFactory(), ccr.NewMetaFactory(), base.NewSpecerFactory(), ccr.DefaultThriftMetaFactory)
// Step 3: create job manager && http service && checker
hostInfo := fmt.Sprintf("%s:%d", syncer.Host, syncer.Port)
@@ -116,7 +126,22 @@ func main() {
checker.Start()
}()
- // Step 6: start signal mux
+ // Step 7: init metrics
+ sink, err := prometheus.NewPrometheusSink()
+ if err != nil {
+ log.Fatalf("new prometheus sink failed: %+v", err)
+ }
+ metrics.NewGlobal(metrics.DefaultConfig("ccr-metrics"), sink)
+
+ // Step 8: start monitor
+ monitor := NewMonitor(jobManager)
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ monitor.Start()
+ }()
+
+ // Step 9: start signal mux
// use closure to capture httpService, checker, jobManager
signalHandler := func(signal os.Signal) bool {
switch signal {
@@ -126,6 +151,7 @@ func main() {
httpService.Stop()
checker.Stop()
jobManager.Stop()
+ monitor.Stop()
log.Info("all service stop")
return true
case syscall.SIGHUP:
@@ -143,6 +169,18 @@ func main() {
signalMux.Serve()
}()
- // Step 6: wait for all task done
+ // Step 10: start pprof
+ if syncer.Pprof == true {
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ var pprof_info string = fmt.Sprintf("%s:%d", syncer.Host, syncer.Ppof_port)
+ if err := http.ListenAndServe(pprof_info, nil); err != nil {
+ log.Infof("start pprof failed on: %s, error : %+v", pprof_info, err)
+ }
+ }()
+ }
+
+ // Step 11: wait for all task done
wg.Wait()
}
diff --git a/cmd/ccr_syncer/monitor.go b/cmd/ccr_syncer/monitor.go
new file mode 100644
index 00000000..cf63ec8d
--- /dev/null
+++ b/cmd/ccr_syncer/monitor.go
@@ -0,0 +1,92 @@
+package main
+
+import (
+ "runtime"
+ "strings"
+ "time"
+
+ "github.com/selectdb/ccr_syncer/pkg/ccr"
+ log "github.com/sirupsen/logrus"
+)
+
+const (
+ MONITOR_DURATION = time.Second * 60
+)
+
+type Monitor struct {
+ jobManager *ccr.JobManager
+ stop chan struct{}
+}
+
+func NewMonitor(jm *ccr.JobManager) *Monitor {
+ return &Monitor{
+ jobManager: jm,
+ stop: make(chan struct{}),
+ }
+}
+
+func (m *Monitor) dump() {
+ log.Infof("[GOROUTINE] Total = %v", runtime.NumGoroutine())
+
+ mb := func(b uint64) uint64 {
+ return b / 1024 / 1024
+ }
+
+ // see: https://golang.org/pkg/runtime/#MemStats
+ var stats runtime.MemStats
+ runtime.ReadMemStats(&stats)
+ liveObjects := stats.Mallocs - stats.Frees
+ log.Infof("[MEMORY STATS] Alloc = %v MiB, TotalAlloc = %v MiB, Sys = %v MiB, NumGC = %v, LiveObjects = %v",
+ mb(stats.Alloc), mb(stats.TotalAlloc), mb(stats.Sys), stats.NumGC, liveObjects)
+
+ jobs := m.jobManager.ListJobs()
+ numJobs := len(jobs)
+ numRunning := 0
+ numFullSync := 0
+ numIncremental := 0
+ numPartialSync := 0
+ numTableSync := 0
+ numDbSync := 0
+ for _, job := range jobs {
+ if strings.HasPrefix(job.ProgressState, "DB") {
+ numDbSync += 1
+ } else {
+ numTableSync += 1
+ }
+ if job.State == "running" {
+ numRunning += 1
+ if strings.Contains(job.ProgressState, "FullSync") {
+ numFullSync += 1
+ } else if strings.Contains(job.ProgressState, "PartialSync") {
+ numPartialSync += 1
+ } else if strings.Contains(job.ProgressState, "IncrementalSync") {
+ numIncremental += 1
+ }
+ }
+ }
+
+ log.Infof("[JOB STATS] Total = %v, Running = %v, DBSync = %v, TableSync = %v",
+ numJobs, numRunning, numDbSync, numTableSync)
+ log.Infof("[JOB STATUS] FullSync = %v, PartialSync = %v, IncrementalSync = %v",
+ numFullSync, numPartialSync, numIncremental)
+}
+
+func (m *Monitor) Start() {
+ ticker := time.NewTicker(MONITOR_DURATION)
+ defer ticker.Stop()
+
+ for {
+ select {
+ case <-m.stop:
+ log.Info("monitor stopped")
+ return
+ case <-ticker.C:
+ m.dump()
+ }
+ }
+}
+
+func (m *Monitor) Stop() {
+ log.Info("monitor stopping")
+ close(m.stop)
+}
diff --git a/cmd/ingest_binlog/ingest_binlog.go b/cmd/ingest_binlog/ingest_binlog.go
index 486bb38c..933e579e 100644
--- a/cmd/ingest_binlog/ingest_binlog.go
+++ b/cmd/ingest_binlog/ingest_binlog.go
@@ -108,12 +108,11 @@ func test_commit(t *base.Spec) {
func test_ingest_be() {
backend := base.Backend{
- Id: 10028,
- Host: "127.0.0.1",
- HeartbeatPort: 9050,
- BePort: 9060,
- HttpPort: 8040,
- BrpcPort: 8060,
+ Id: 10028,
+ Host: "127.0.0.1",
+ BePort: 9060,
+ HttpPort: 8040,
+ BrpcPort: 8060,
}
rpcFactory := rpc.NewRpcFactory()
rpc, err := rpcFactory.NewBeRpc(&backend)
@@ -152,7 +151,7 @@ func test_ingrest_binlog(src *base.Spec, dest *base.Spec) {
case "commit":
test_commit(dest)
case "abort":
- panic("unkown abort action")
+ panic("unknown abort action")
case "ingest_be":
test_ingest_be()
default:
diff --git a/cmd/metrics/metrics_demo.go b/cmd/metrics/metrics_demo.go
new file mode 100644
index 00000000..5c1a9b93
--- /dev/null
+++ b/cmd/metrics/metrics_demo.go
@@ -0,0 +1,31 @@
+package main
+
+import (
+ "log"
+ "net/http"
+ "time"
+
+ "github.com/hashicorp/go-metrics"
+ prometheussink "github.com/hashicorp/go-metrics/prometheus"
+ "github.com/prometheus/client_golang/prometheus/promhttp"
+)
+
+func promHttp() {
+ http.Handle("/metrics", promhttp.Handler())
+ log.Fatal(http.ListenAndServe(":8080", nil))
+}
+
+func main() {
+ go promHttp()
+ sink, _ := prometheussink.NewPrometheusSink()
+ metrics.NewGlobal(metrics.DefaultConfig("service-name"), sink)
+ metrics.SetGauge([]string{"foo"}, 42)
+ metrics.EmitKey([]string{"bar"}, 30)
+ metrics.IncrCounter([]string{"baz"}, 42)
+ metrics.IncrCounter([]string{"baz"}, 1)
+ metrics.IncrCounter([]string{"baz"}, 80)
+ metrics.AddSample([]string{"method", "wow"}, 42)
+ metrics.AddSample([]string{"method", "wow"}, 100)
+ metrics.AddSample([]string{"method", "wow"}, 22)
+ time.Sleep(10000000 * time.Second)
+}
diff --git a/cmd/snapshot_op/snapshot_op.go b/cmd/snapshot_op/snapshot_op.go
index 43e52d63..15755820 100644
--- a/cmd/snapshot_op/snapshot_op.go
+++ b/cmd/snapshot_op/snapshot_op.go
@@ -103,7 +103,7 @@ func test_restore_snapshot(src *base.Spec, dest *base.Spec) {
if err != nil {
panic(err)
}
- restoreResp, err := destRpc.RestoreSnapshot(dest, nil, labelName, snapshotResp)
+ restoreResp, err := destRpc.RestoreSnapshot(dest, nil, labelName, snapshotResp, false, false)
if err != nil {
panic(err)
}
diff --git a/cmd/thrift_get_meta/thrift_get_meta.go b/cmd/thrift_get_meta/thrift_get_meta.go
new file mode 100644
index 00000000..7135ff2d
--- /dev/null
+++ b/cmd/thrift_get_meta/thrift_get_meta.go
@@ -0,0 +1,146 @@
+package main
+
+import (
+ "encoding/json"
+ "flag"
+
+ log "github.com/sirupsen/logrus"
+
+ "github.com/selectdb/ccr_syncer/pkg/ccr"
+ "github.com/selectdb/ccr_syncer/pkg/ccr/base"
+ "github.com/selectdb/ccr_syncer/pkg/rpc"
+ "github.com/selectdb/ccr_syncer/pkg/utils"
+)
+
+var (
+ host string
+ port string
+ thriftPort string
+ user string
+ password string
+ dbName string
+ tableName string
+)
+
+func init() {
+ flag.StringVar(&host, "host", "localhost", "host")
+ flag.StringVar(&port, "port", "9030", "port")
+ flag.StringVar(&thriftPort, "thrift_port", "9020", "thrift port")
+ flag.StringVar(&user, "user", "root", "user")
+ flag.StringVar(&password, "password", "", "password")
+ flag.StringVar(&dbName, "db", "ccr", "database name")
+ flag.StringVar(&tableName, "table", "src_1", "table name")
+ flag.Parse()
+
+ utils.InitLog()
+}
+
+func test_get_table_meta(m ccr.Metaer, spec *base.Spec) {
+ if dbId, err := m.GetDbId(); err != nil {
+ panic(err)
+ } else {
+ spec.DbId = dbId
+ log.Infof("found db: %s, dbId: %d", spec.Database, dbId)
+ }
+
+ if tableId, err := m.GetTableId(spec.Table); err != nil {
+ panic(err)
+ } else {
+ spec.TableId = tableId
+ log.Infof("found table: %s, tableId: %d", spec.Table, tableId)
+ }
+
+ rpcFactory := rpc.NewRpcFactory()
+ feRpc, err := rpcFactory.NewFeRpc(spec)
+ if err != nil {
+ panic(err)
+ }
+
+ tableIds := make([]int64, 0)
+ tableIds = append(tableIds, spec.TableId)
+ result, err := feRpc.GetTableMeta(spec, tableIds)
+ if err != nil {
+ panic(err)
+ }
+ // toJson
+ s, err := json.Marshal(&result)
+ if err != nil {
+ panic(err)
+ }
+ log.Infof("found db meta: %s", s)
+
+ thriftMeta, err := ccr.NewThriftMeta(spec, rpcFactory, tableIds)
+ if err != nil {
+ panic(err)
+ }
+ log.Infof("found thrift meta: %+v", thriftMeta)
+}
+
+func test_get_db_meta(m ccr.Metaer, spec *base.Spec) {
+ if dbId, err := m.GetDbId(); err != nil {
+ panic(err)
+ } else {
+ spec.DbId = dbId
+ log.Infof("found db: %s, dbId: %d", spec.Database, dbId)
+ }
+
+ rpcFactory := rpc.NewRpcFactory()
+ feRpc, err := rpcFactory.NewFeRpc(spec)
+ if err != nil {
+ panic(err)
+ }
+
+ result, err := feRpc.GetDbMeta(spec)
+ if err != nil {
+ panic(err)
+ }
+ // toJson
+ s, err := json.Marshal(result)
+ if err != nil {
+ panic(err)
+ }
+ log.Infof("found db meta: %s", s)
+}
+
+func test_get_backends(m ccr.Metaer, spec *base.Spec) {
+ rpcFactory := rpc.NewRpcFactory()
+ feRpc, err := rpcFactory.NewFeRpc(spec)
+ if err != nil {
+ panic(err)
+ }
+
+ result, err := feRpc.GetBackends(spec)
+ if err != nil {
+ panic(err)
+ }
+ // toJson
+ s, err := json.Marshal(&result)
+ if err != nil {
+ panic(err)
+ }
+ log.Infof("found backends: %s", s)
+}
+
+func main() {
+ src := &base.Spec{
+ Frontend: base.Frontend{
+ Host: host,
+ Port: port,
+ ThriftPort: thriftPort,
+ },
+ User: user,
+ Password: password,
+ Database: dbName,
+ Table: tableName,
+ }
+
+ metaFactory := ccr.NewMetaFactory()
+ meta := metaFactory.NewMeta(src)
+
+ if tableName != "" {
+ test_get_table_meta(meta, src)
+ } else {
+ test_get_db_meta(meta, src)
+ }
+ test_get_backends(meta, src)
+}
diff --git a/devtools/issue_test/priv.sh b/devtools/issue_test/priv.sh
new file mode 100755
index 00000000..6a750222
--- /dev/null
+++ b/devtools/issue_test/priv.sh
@@ -0,0 +1,23 @@
+#!/usr/bin/env bash
+
+curl -X POST -H "Content-Type: application/json" -d '{
+ "name": "priv_test",
+ "src": {
+ "host": "localhost",
+ "port": "9030",
+ "thrift_port": "9020",
+ "user": "etl",
+ "password": "etl%2023",
+ "database": "tmp",
+ "table": "ccr_test_src"
+ },
+ "dest": {
+ "host": "localhost",
+ "port": "9030",
+ "thrift_port": "9020",
+ "user": "etl",
+ "password": "etl%2023",
+ "database": "tmp",
+ "table": "ccr_test_dst"
+ }
+}' http://127.0.0.1:9190/create_ccr
diff --git a/devtools/test_ccr_db_table_alias.sh b/devtools/test_ccr_db_table_alias.sh
new file mode 100755
index 00000000..78879b89
--- /dev/null
+++ b/devtools/test_ccr_db_table_alias.sh
@@ -0,0 +1,23 @@
+#!/bin/bash
+
+curl -X POST -H "Content-Type: application/json" -d '{
+ "name": "ccr_db_table_alias",
+ "src": {
+ "host": "localhost",
+ "port": "9030",
+ "thrift_port": "9020",
+ "user": "root",
+ "password": "",
+ "database": "ccr",
+ "table": "src_1"
+ },
+ "dest": {
+ "host": "localhost",
+ "port": "9030",
+ "thrift_port": "9020",
+ "user": "root",
+ "password": "",
+ "database": "dccr",
+ "table": "src_1_alias"
+ }
+}' http://127.0.0.1:9190/create_ccr
diff --git a/devtools/test_ccr_many_rows.sh b/devtools/test_ccr_many_rows.sh
new file mode 100755
index 00000000..cf001c7c
--- /dev/null
+++ b/devtools/test_ccr_many_rows.sh
@@ -0,0 +1,23 @@
+#!/bin/bash
+
+curl -X POST -H "Content-Type: application/json" -d '{
+ "name": "ccr_table_many_rows",
+ "src": {
+ "host": "localhost",
+ "port": "9030",
+ "thrift_port": "9020",
+ "user": "root",
+ "password": "",
+ "database": "ccr",
+ "table": "many"
+ },
+ "dest": {
+ "host": "localhost",
+ "port": "9030",
+ "thrift_port": "9020",
+ "user": "root",
+ "password": "",
+ "database": "ccr",
+ "table": "many_alias"
+ }
+}' http://127.0.0.1:9190/create_ccr
diff --git a/devtools/test_ccr_table_alias.sh b/devtools/test_ccr_table_alias.sh
index e11857d0..e3582def 100755
--- a/devtools/test_ccr_table_alias.sh
+++ b/devtools/test_ccr_table_alias.sh
@@ -19,5 +19,6 @@ curl -X POST -H "Content-Type: application/json" -d '{
"password": "",
"database": "ccr",
"table": "src_1_alias"
- }
+ },
+ "skip_error": false
}' http://127.0.0.1:9190/create_ccr
diff --git a/devtools/test_limit_speed.sh b/devtools/test_limit_speed.sh
new file mode 100755
index 00000000..9d9306c3
--- /dev/null
+++ b/devtools/test_limit_speed.sh
@@ -0,0 +1,23 @@
+#!/bin/bash
+
+curl -X POST -H "Content-Type: application/json" -d '{
+ "name": "test_speed_limit",
+ "src": {
+ "host": "localhost",
+ "port": "9030",
+ "thrift_port": "9020",
+ "user": "root",
+ "password": "",
+ "database": "ccr",
+ "table": "github_test_1"
+ },
+ "dest": {
+ "host": "localhost",
+ "port": "9030",
+ "thrift_port": "9020",
+ "user": "root",
+ "password": "",
+ "database": "dccr",
+ "table": "github_test_1_sync"
+ }
+}' http://127.0.0.1:9190/create_ccr
diff --git a/devtools/update_job.sh b/devtools/update_job.sh
new file mode 100755
index 00000000..3f0adc24
--- /dev/null
+++ b/devtools/update_job.sh
@@ -0,0 +1,6 @@
+#!/bin/bash
+
+curl -X POST -H "Content-Type: application/json" -d '{
+ "name": "ccr_test",
+ "skip": true
+}' http://127.0.0.1:9190/update_job
diff --git a/doc/operations.md b/doc/operations.md
index a202fda6..3007208f 100644
--- a/doc/operations.md
+++ b/doc/operations.md
@@ -1,39 +1,124 @@
# Syncer操作列表
+
### 请求的通用模板
+
```bash
curl -X POST -H "Content-Type: application/json" -d {json_body} http://ccr_syncer_host:ccr_syncer_port/operator
```
-json_body: 以json的格式发送操作所需信息
-operator:对应Syncer的不同操作
+- json_body: 以json的格式发送操作所需信息
+- operator:对应Syncer的不同操作
+
### operators
-- create_ccr
- 创建CCR任务,详见[README](../README.md)
-- get_lag
+
+- `create_ccr`
+ 创建CCR任务,详见[README](../README.md)。
+- `get_lag`
查看同步进度
```bash
- curl -X POST -H "Content-Type: application/json" -d '{
+ curl -X POST -L --post303 -H "Content-Type: application/json" -d '{
"name": "job_name"
}' http://ccr_syncer_host:ccr_syncer_port/get_lag
```
其中job_name是create_ccr时创建的name
-- pause
+- `pause`
暂停同步任务
```bash
- curl -X POST -H "Content-Type: application/json" -d '{
+ curl -X POST -L --post303 -H "Content-Type: application/json" -d '{
"name": "job_name"
}' http://ccr_syncer_host:ccr_syncer_port/pause
```
-- resume
+- `resume`
恢复同步任务
```bash
- curl -X POST -H "Content-Type: application/json" -d '{
+ curl -X POST -L --post303 -H "Content-Type: application/json" -d '{
"name": "job_name"
}' http://ccr_syncer_host:ccr_syncer_port/resume
```
-- delete
+- `delete`
删除同步任务
```bash
- curl -X POST -H "Content-Type: application/json" -d '{
+ curl -X POST -L --post303 -H "Content-Type: application/json" -d '{
"name": "job_name"
}' http://ccr_syncer_host:ccr_syncer_port/delete
- ```
\ No newline at end of file
+ ```
+- `list_jobs`
+ 列出所有job名称
+ ```bash
+ curl -X POST -L --post303 -H "Content-Type: application/json" -d '{}' http://ccr_syncer_host:ccr_syncer_port/list_jobs
+ ```
+- `job_detail`
+ 展示job的详细信息
+ ```bash
+ curl -X POST -L --post303 -H "Content-Type: application/json" -d '{
+ "name": "job_name"
+ }' http://ccr_syncer_host:ccr_syncer_port/job_detail
+ ```
+- `job_progress`
+ 展示job的详细进度信息
+ ```bash
+ curl -X POST -L --post303 -H "Content-Type: application/json" -d '{
+ "name": "job_name"
+ }' http://ccr_syncer_host:ccr_syncer_port/job_progress
+ ```
+- `metrics`
+ 获取golang以及ccr job的metrics信息
+ ```bash
+ curl -L --post303 http://ccr_syncer_host:ccr_syncer_port/metrics
+ ```
+- `update_host_mapping`
+ 更新上游 FE/BE 集群 private ip 到 public ip 的映射;如果参数中的 public ip 为空,则删除该 private 的映射
+ ```bash
+ curl -X POST -L --post303 -H "Content-Type: application/json" -d '{
+ "name": "job_name",
+ "src_host_mapping": {
+ "172.168.1.1": "10.0.10.1",
+ "172.168.1.2": "10.0.10.2",
+ "172.168.1.3": "10.0.10.3",
+ "172.168.1.5": ""
+ },
+ "dest_host_mapping": {
+ ...
+ }
+ }' http://ccr_syncer_host:ccr_syncer_port/add_host_mapping
+ ```
+ 更新上游 172.168.1.1-3 的映射,同时删除 172.168.1.5 的映射。
+ - `src_host_mapping`: 上游映射
+ - `dest_host_mapping`: 下游映射
+
+### 一些特殊场景
+
+#### 上下游通过公网 IP 进行同步
+
+ccr syncer 支持将上下游部署到不同的网络环境中,并通过公网 IP 进行数据同步。
+
+具体方案:每个 job 会记录下上游 private IP 到 public IP 的映射关系(由用户提供),并在下游载入 binlog 前,将上游集群 FE/BE 的 private 转换成对应的 public IP。
+
+使用方式:创建 ccr job 时增加一个参数:
+```bash
+curl -X POST -H "Content-Type: application/json" -d '{
+ "name": "ccr_test",
+ "src": {
+ "host_mapping": {
+ "172.168.1.1": "10.0.10.1",
+ "172.168.1.2": "10.0.10.2",
+ "172.168.1.3": "10.0.10.3"
+ },
+ ...
+ },
+ "dest": {
+ "host_mapping": {
+ "172.168.2.3": "10.0.10.9",
+ "172.168.2.4": ""
+ },
+ ...
+ },
+}' http://127.0.0.1:9190/create_ccr
+```
+
+`host_mapping` 用法与 `/update_host_mapping` 接口一致。
+
+> 注意:即使增加了 host_mapping 字段,**src/dest 中的 host 字段仍需要设置为 public ip**。
+
+相关操作:
+- 修改/删除/增加新映射,使用 `/update_host_mapping` 接口
+- 查看 job 的所有映射,使用 `/job_detail` 接口
diff --git a/doc/pprof.md b/doc/pprof.md
new file mode 100644
index 00000000..d3377fad
--- /dev/null
+++ b/doc/pprof.md
@@ -0,0 +1,19 @@
+# pprof使用介绍
+
+## pprof简介
+pprof是golang语言中,用来分析性能的工具,pprof有4种profling:
+1. CPU Profiling : CPU 性能分析
+2. memory Profiling : 程序的内存占用情况
+3. Block Profiling : goroutine 在等待共享资源花费的时间
+4. Mutex Profiling : 只记录因为锁竞争导致的等待或延迟
+目前CCR已经集成了pprof,可以用来分析CCR的性能。
+
+## CCR中使用pprof的步骤
+1. 启动CCR进程时,可以通过sh shell/start_syncer.sh --pprof true --pprof_port 8080 --host x.x.x.x --daemon的方式打开pprof
+2. 在浏览器中打开 http://x.x.x.x:8080/debug/pprof/ 即可看到profiling
+3. 或者可以使用采样工具,通过更加图形化的方式来分析,此时可以在8080端口启动后,在ccr机器上执行
+``` go tool pprof -http=:9999 http://x.x.x.x:8080/debug/pprof/heap ```
+然后在浏览器打开 http://x.x.x.x:9999 即可看到采样图形化信息
+此处需要注意的是,如果无法开通端口,可以使用如下命令将采样信息保存到文件中,再将文件拉到本地使用浏览器打开:
+``` curl http://localhost:8080/debug/pprof/heap?seconds=30 > heap.out ```
+``` go tool pprof heap.out ```
\ No newline at end of file
diff --git a/doc/run-regression-test-en.md b/doc/run-regression-test-en.md
new file mode 100644
index 00000000..72185fcd
--- /dev/null
+++ b/doc/run-regression-test-en.md
@@ -0,0 +1,70 @@
+# Regression Test Considerations
+## Steps to Run Tests
+### 1. Copy Test and CCR Interface Libraries
+The regression tests for CCR require the regression test framework from doris/regression-test. Therefore, when running tests, we need to move the tests and CCR interfaces to the doris/regression-test directory.
+
+Create a folder named ccr-syncer-test under the doris/regression-test/suites directory and copy the test files into this folder. Next, copy the files from ccr-syncer/regression-test/common to doris/regression-test/common. The framework for the tests is now set up.
+### 2. Configure regression-conf.groovy (doris)
+Add and configure the following in the configuration file based on the actual situation:
+```bash
+// JDBC configuration
+jdbcUrl = "jdbc:mysql://127.0.0.1:9030/?"
+targetJdbcUrl = "jdbc:mysql://127.0.0.1:9190/?
+jdbcUser = "root"
+jdbcPassword = ""
+
+feSourceThriftAddress = "127.0.0.1:9220"
+feTargetThriftAddress = "127.0.0.1:9220"
+syncerAddress = "127.0.0.1:9190"
+feSyncerUser = "root"
+feSyncerPassword = ""
+feHttpAddress = "127.0.0.1:8330"
+
+// CCR configuration
+ccrDownstreamUrl = "jdbc:mysql://172.19.0.2:9131/?"
+
+ccrDownstreamUser = "root"
+
+ccrDownstreamPassword = ""
+
+ccrDownstreamFeThriftAddress = "127.0.0.1:9020"
+```
+### 3. Run the Tests
+Ensure that at least one BE and FE are deployed for Doris and that CCR-Syncer is deployed before running the tests.
+```bash
+Run the tests using the Doris script
+# --Run test cases with suiteName sql_action, currently suiteName equals the prefix of the file name, the example corresponds to the test file sql_action.groovy
+./run-regression-test.sh --run sql_action
+```
+The steps to run the tests are now complete.
+## Steps to Write Test Cases
+### 1. Create Test Files
+Navigate to the ccr-syncer/regression-test/suites directory and create folders based on the synchronization level. For example, for the DB level, go to the db_sync folder. Further divide the folders based on the synchronization object. For example, for the column object, go to the column folder. Divide the folders based on the actions on the object. For example, for the rename action, create a rename folder. Create the test file in this folder with a name prefixed by test followed by the sequence of directories entered, e.g., test_ds_col_rename represents the synchronization test for renaming a column at the DB level.
+
+**Ensure there is only one test file in each smallest folder.**
+### 2. Write the Test
+CCR Interface Explanation:
+```
+ // Enable Binlog
+ helper.enableDbBinlog()
+
+ // Functions for creating, deleting, pausing, and resuming tasks support an optional parameter.
+ // For example, to create a task. If empty, it defaults to creating a DB-level synchronization task with the target database as context.dbName.
+ helper.ccrJobCreate()
+
+ // If not empty, it creates a table-level synchronization task with the target database as context.dbName, target table as tableName.
+ helper.ccrJobCreate(tableName)
+
+ // Check if the SQL execution result matches the res_func function, where sql_type is "sql" (source cluster) or "target_sql" (target cluster), and time_out is the timeout duration.
+ helper.checkShowTimesOf(sql, res_func, time_out, sql_type)
+```
+**注意事项**
+```
+1. Two clusters will be created during the test: SQL is sent to the upstream cluster, and target_sql is sent to the downstream cluster. Use target_sql for operations involving the target cluster.
+
+2. Ensure the source database is not empty when creating a task, otherwise, the task creation will fail.
+
+3. Perform checks on both upstream and downstream before and after modifying objects to ensure correctness.
+
+4. Ensure the length of the automatically created dbName does not exceed 64.
+```
\ No newline at end of file
diff --git a/doc/run-regression-test-zh.md b/doc/run-regression-test-zh.md
new file mode 100644
index 00000000..f2dcbda7
--- /dev/null
+++ b/doc/run-regression-test-zh.md
@@ -0,0 +1,68 @@
+# 回归测试注意事项
+## 运行测试的步骤
+### 1. 复制测试及 ccr 接口库
+CCR 的回归测试需要用到 doris/regression-test 的回归测试框架, 所以我们运行测试时需要将测试和 ccr 接口迁移到doris/regression-test 目录下
+在 doris/regression-test/suites 目录下建立文件夹 ccr-syncer-test, 将测试文件复制到此文件夹, 其次将 ccr-syncer/regression-test/common 下的文件复制到 doris/regression-test/comman 目录下, 至此测试前的框架已经搭好
+### 2. 配置 regression-conf.groovy
+根据实际情况在配置文件中添加如下并配置 jdbc fe ccr
+```bash
+// Jdbc配置
+jdbcUrl = "jdbc:mysql://127.0.0.1:9030/?"
+targetJdbcUrl = "jdbc:mysql://127.0.0.1:9190/?
+jdbcUser = "root"
+jdbcPassword = ""
+
+feSourceThriftAddress = "127.0.0.1:9020"
+feTargetThriftAddress = "127.0.0.1:9020"
+syncerAddress = "127.0.0.1:9190"
+feSyncerUser = "root"
+feSyncerPassword = ""
+feHttpAddress = "127.0.0.1:8030"
+
+// ccr配置
+ccrDownstreamUrl = "jdbc:mysql://172.19.0.2:9131/?"
+
+ccrDownstreamUser = "root"
+
+ccrDownstreamPassword = ""
+
+ccrDownstreamFeThriftAddress = "127.0.0.1:9020"
+```
+### 3. 运行测试
+在运行测试前确保 doris 至少一个 be, fe 部署完成, 确保 ccr-syncer 部署完成
+```bash
+使用 doris 脚本运行测试
+# --测试suiteName为sql_action的用例, 目前suiteName等于文件名前缀, 例子对应的用例文件是sql_action.groovy
+./run-regression-test.sh --run sql_action
+```
+至此运行测试的步骤已完成
+## 编写测试用例的步骤
+### 1. 创建测试文件
+进入 ccr-syncer/regressioon-test/suites 目录, 根据同步级别划分文件夹, 以db级别为例, 进入 db_sync 文件夹, 根据同步对象划分文件夹, 以 column 为例, 进入 column 文件夹, 根据对对象的行为划分文件夹, 以rename为例, 创建 rename 文件夹, 在此文件夹下创建测试, 文件名为 test 前缀加依次进入目录的顺序, 例如 test_ds_col_rename 代表在db级别下 rename column 的同步测试
+**确保在每个最小文件夹下只有一个测试文件**
+### 2. 编写测试
+ccr 接口说明
+```
+ // 开启Binlog
+ helper.enableDbBinlog()
+
+ // 创建、删除、暂停、恢复任务等函数支持一个可选参数。
+ // 以创建任务为例, 参数为 tableName, 参数为空时, 默认创建db级别同步任务, 目标数据库为context.dbName
+ helper.ccrJobCreate()
+
+ // 不为空时创建 tbl 级别同步任务, 目标数据库为context.dbName, 目标表为 tableName
+ helper.ccrJobCreate(tableName)
+
+ // 检测 sql 运行结果是否符合 res_func函数, sql_type 为 "sql" (源集群) 或 "target_sql" (目标集群), time_out 为超时时间
+ helper.checkShowTimesOf(sql, res_func, time_out, sql_type)
+```
+**注意事项**
+```
+1. 测试时会建两个集群, sql 发给上游集群, target_sql 发给下游集群, 涉及到目标集群的需要用 target_sql
+
+2. 创建任务时确保源数据库不为空, 否则创建任务会失败
+
+3. 在修改对象前后都需要对上下游进行 check 保证结果正确
+
+4. 确保测试自动创建的 dbName 的长度不超过 64
+```
\ No newline at end of file
diff --git a/doc/start_syncer.md b/doc/start_syncer.md
index e19cce8b..2a14b4fb 100644
--- a/doc/start_syncer.md
+++ b/doc/start_syncer.md
@@ -23,12 +23,12 @@ bash bin/start_syncer.sh --daemon
```
### --db_type
-Syncer目前能够使用两种数据库来保存自身的元数据,分别为`sqlite3`(对应本地存储)和`mysql`(本地或远端存储)
+Syncer目前能够使用两种数据库来保存自身的元数据,分别为`sqlite3`(对应本地存储)和`mysql` 或者`postgresql`(本地或远端存储)
```bash
bash bin/start_syncer.sh --db_type mysql
```
默认值为sqlite3
-在使用mysql存储元数据时,Syncer会使用`CREATE IF NOT EXISTS`来创建一个名为`ccr`的库,ccr相关的元数据表都会保存在其中
+在使用mysql或者postgresql存储元数据时,Syncer会使用`CREATE IF NOT EXISTS`来创建一个名为`ccr`的库,ccr相关的元数据表都会保存在其中
### --db_dir
**这个选项仅在db使用`sqlite3`时生效**
@@ -38,7 +38,7 @@ bash bin/start_syncer.sh --db_dir /path/to/ccr.db
```
默认路径为`SYNCER_OUTPUT_DIR/db`,文件名为`ccr.db`
### --db_host & db_port & db_user & db_password
-**这个选项仅在db使用`mysql`时生效**
+**这个选项仅在db使用`mysql`或者`postgresql`时生效**
```bash
bash bin/start_syncer.sh --db_host 127.0.0.1 --db_port 3306 --db_user root --db_password "qwe123456"
```
@@ -54,7 +54,7 @@ bash bin/start_syncer.sh --log_dir /path/to/ccr_syncer.log
```bash
bash bin/start_syncer.sh --log_level info
```
-日志的格式如下,其中hook只会在`log_level > info`的时候打印:
+
```
# time level msg hooks
[2023-07-18 16:30:18] TRACE This is trace type. ccrName=xxx line=xxx
@@ -80,4 +80,32 @@ pid文件是stop_syncer.sh脚本用于关闭Syncer的凭据,里面保存了对
```bash
bash bin/start_syncer.sh --pid_dir /path/to/pids
```
-默认值为`SYNCER_OUTPUT_DIR/bin`
\ No newline at end of file
+默认值为`SYNCER_OUTPUT_DIR/bin`
+
+### --commit_txn_timeout
+用于指定提交事务超时时间
+```bash
+bash bin/start_syncer.sh --commit_txn_timeout 33s
+```
+默认值为33s
+
+### --connect_timeout duration
+用于指定连接超时时间
+```bash
+bash bin/start_syncer.sh --connect_timeout 10s
+```
+默认值为1s
+
+### --local_repo_name string
+用于指定本地仓库名称
+```bash
+bash bin/start_syncer.sh --local_repo_name "repo_name"
+```
+默认值为""
+
+### --rpc_timeout duration
+用于指定rpc超时时间
+```bash
+bash bin/start_syncer.sh --rpc_timeout 30s
+```
+默认值为3s
\ No newline at end of file
diff --git a/go.mod b/go.mod
index 5e4dbb67..9958b4dc 100644
--- a/go.mod
+++ b/go.mod
@@ -3,73 +3,80 @@ module github.com/selectdb/ccr_syncer
go 1.20
require (
- github.com/apache/thrift v0.13.0
- github.com/cloudwego/kitex v0.6.2-0.20230814131251-645fec2e4585
- github.com/go-sql-driver/mysql v1.7.0
+ github.com/apache/thrift v0.19.0
+ github.com/cloudwego/kitex v0.8.0
+ github.com/go-sql-driver/mysql v1.7.1
+ github.com/hashicorp/go-metrics v0.5.3
github.com/keepeye/logrus-filename v0.0.0-20190711075016-ce01a4391dd1
- github.com/mattn/go-sqlite3 v1.14.17
+ github.com/mattn/go-sqlite3 v1.14.22
github.com/modern-go/gls v0.0.0-20220109145502-612d0167dce5
- github.com/pkg/errors v0.9.1
- github.com/sirupsen/logrus v1.9.0
+ github.com/prometheus/client_golang v1.18.0
+ github.com/sirupsen/logrus v1.9.3
github.com/stretchr/testify v1.8.4
github.com/t-tomalak/logrus-prefixed-formatter v0.5.2
- github.com/tidwall/btree v1.6.0
- go.uber.org/mock v0.2.0
- go.uber.org/zap v1.24.0
- golang.org/x/exp v0.0.0-20230626212559-97b1e661b5df
+ github.com/tidwall/btree v1.7.0
+ go.uber.org/mock v0.4.0
+ golang.org/x/exp v0.0.0-20240213143201-ec583247a57a
gopkg.in/natefinch/lumberjack.v2 v2.2.1
)
// dependabot
-require golang.org/x/net v0.17.0 // indirect; https://github.com/selectdb/ccr-syncer/security/dependabot/2
+require golang.org/x/net v0.21.0 // indirect; https://github.com/selectdb/ccr-syncer/security/dependabot/2
require (
- github.com/bytedance/gopkg v0.0.0-20230728082804-614d0af6619b // indirect
- github.com/bytedance/sonic v1.9.1 // indirect
- github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 // indirect
- github.com/chenzhuoyu/iasm v0.9.0 // indirect
- github.com/choleraehyq/pid v0.0.17 // indirect
+ github.com/beorn7/perks v1.0.1 // indirect
+ github.com/bufbuild/protocompile v0.8.0 // indirect
+ github.com/bytedance/gopkg v0.0.0-20240202110943-5e26950c5e57 // indirect
+ github.com/bytedance/sonic v1.11.0 // indirect
+ github.com/cespare/xxhash/v2 v2.2.0 // indirect
+ github.com/chenzhuoyu/base64x v0.0.0-20230717121745-296ad89f973d // indirect
+ github.com/chenzhuoyu/iasm v0.9.1 // indirect
+ github.com/choleraehyq/pid v0.0.18 // indirect
github.com/cloudwego/configmanager v0.2.0 // indirect
- github.com/cloudwego/dynamicgo v0.1.2 // indirect
+ github.com/cloudwego/dynamicgo v0.2.0 // indirect
github.com/cloudwego/fastpb v0.0.4 // indirect
- github.com/cloudwego/frugal v0.1.7 // indirect
+ github.com/cloudwego/frugal v0.1.13 // indirect
github.com/cloudwego/localsession v0.0.2 // indirect
- github.com/cloudwego/netpoll v0.4.1 // indirect
- github.com/cloudwego/thriftgo v0.3.0 // indirect
+ github.com/cloudwego/netpoll v0.5.1 // indirect
+ github.com/cloudwego/thriftgo v0.3.6 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/fatih/structtag v1.2.0 // indirect
github.com/golang/protobuf v1.5.3 // indirect
- github.com/google/go-cmp v0.5.9 // indirect
- github.com/google/pprof v0.0.0-20220608213341-c488b8fa1db3 // indirect
- github.com/iancoleman/strcase v0.2.0 // indirect
- github.com/jhump/protoreflect v1.8.2 // indirect
+ github.com/google/pprof v0.0.0-20240207164012-fb44976bdcd5 // indirect
+ github.com/hashicorp/go-immutable-radix v1.3.1 // indirect
+ github.com/hashicorp/golang-lru v1.0.2 // indirect
+ github.com/iancoleman/strcase v0.3.0 // indirect
+ github.com/jhump/protoreflect v1.15.6 // indirect
github.com/json-iterator/go v1.1.12 // indirect
- github.com/klauspost/cpuid/v2 v2.2.4 // indirect
+ github.com/klauspost/cpuid/v2 v2.2.6 // indirect
+ github.com/lib/pq v1.10.9 // indirect
github.com/mattn/go-colorable v0.1.13 // indirect
- github.com/mattn/go-isatty v0.0.16 // indirect
+ github.com/mattn/go-isatty v0.0.20 // indirect
github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d // indirect
- github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421 // indirect
+ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/oleiade/lane v1.0.1 // indirect
github.com/onsi/ginkgo v1.16.5 // indirect
github.com/onsi/gomega v1.27.8 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
- github.com/tidwall/gjson v1.9.3 // indirect
+ github.com/prometheus/client_model v0.6.0 // indirect
+ github.com/prometheus/common v0.47.0 // indirect
+ github.com/prometheus/procfs v0.12.0 // indirect
+ github.com/tidwall/gjson v1.17.1 // indirect
github.com/tidwall/match v1.1.1 // indirect
- github.com/tidwall/pretty v1.2.0 // indirect
+ github.com/tidwall/pretty v1.2.1 // indirect
github.com/twitchyliquid64/golang-asm v0.15.1 // indirect
github.com/x-cray/logrus-prefixed-formatter v0.5.2 // indirect
- go.uber.org/atomic v1.7.0 // indirect
- go.uber.org/multierr v1.6.0 // indirect
- golang.org/x/arch v0.2.0 // indirect
- golang.org/x/crypto v0.14.0 // indirect
- golang.org/x/sync v0.3.0 // indirect
- golang.org/x/sys v0.13.0 // indirect
- golang.org/x/term v0.13.0 // indirect
- golang.org/x/text v0.13.0 // indirect
- google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384 // indirect
- google.golang.org/protobuf v1.28.1 // indirect
+ golang.org/x/arch v0.7.0 // indirect
+ golang.org/x/crypto v0.19.0 // indirect
+ golang.org/x/sync v0.6.0 // indirect
+ golang.org/x/sys v0.17.0 // indirect
+ golang.org/x/term v0.17.0 // indirect
+ golang.org/x/text v0.14.0 // indirect
+ google.golang.org/genproto/googleapis/rpc v0.0.0-20240213162025-012b6fc9bca9 // indirect
+ google.golang.org/grpc v1.60.1 // indirect
+ google.golang.org/protobuf v1.32.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
)
diff --git a/go.sum b/go.sum
index 402b0b4a..8e6bddf0 100644
--- a/go.sum
+++ b/go.sum
@@ -4,80 +4,106 @@ gioui.org v0.0.0-20210308172011-57750fc8a0a6/go.mod h1:RSH6KIUZ0p2xy5zHDxgAM4zum
git.sr.ht/~sbinet/gg v0.3.1/go.mod h1:KGYtlADtqsqANL9ueOFkWymvzUvLMQllU5Ixo+8v3pc=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
+github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
github.com/ajstarks/deck v0.0.0-20200831202436-30c9fc6549a9/go.mod h1:JynElWSGnm/4RlzPXRlREEwqTHAN3T56Bv2ITsFT3gY=
github.com/ajstarks/deck/generate v0.0.0-20210309230005-c3f852c02e19/go.mod h1:T13YZdzov6OU0A1+RfKZiZN9ca6VeKdBdyDV+BY97Tk=
github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw=
github.com/ajstarks/svgo v0.0.0-20211024235047-1546f124cd8b/go.mod h1:1KcenG0jGWcpt8ov532z81sp/kMMUG485J2InIOyADM=
+github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
+github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
+github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
+github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/apache/thrift v0.13.0 h1:5hryIiq9gtn+MiLVn0wP37kb/uTeRZgN08WoCsAhIhI=
github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
-github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8=
+github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
+github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
+github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
+github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/boombuler/barcode v1.0.0/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8=
github.com/boombuler/barcode v1.0.1/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8=
github.com/brianvoe/gofakeit/v6 v6.16.0/go.mod h1:Ow6qC71xtwm79anlwKRlWZW6zVq9D2XHE4QSSMP/rU8=
+github.com/bufbuild/protocompile v0.8.0 h1:9Kp1q6OkS9L4nM3FYbr8vlJnEwtbpDPQlQOVXfR+78s=
+github.com/bufbuild/protocompile v0.8.0/go.mod h1:+Etjg4guZoAqzVk2czwEQP12yaxLJ8DxuqCJ9qHdH94=
github.com/bytedance/gopkg v0.0.0-20220413063733-65bf48ffb3a7/go.mod h1:2ZlV9BaUH4+NXIBF0aMdKKAnHTzqH+iMU4KUjAbL23Q=
github.com/bytedance/gopkg v0.0.0-20220509134931-d1878f638986/go.mod h1:2ZlV9BaUH4+NXIBF0aMdKKAnHTzqH+iMU4KUjAbL23Q=
github.com/bytedance/gopkg v0.0.0-20220531084716-665b4f21126f/go.mod h1:2ZlV9BaUH4+NXIBF0aMdKKAnHTzqH+iMU4KUjAbL23Q=
github.com/bytedance/gopkg v0.0.0-20230531144706-a12972768317/go.mod h1:FtQG3YbQG9L/91pbKSw787yBQPutC+457AvDW77fgUQ=
-github.com/bytedance/gopkg v0.0.0-20230728082804-614d0af6619b h1:R6PWoQtxEMpWJPHnpci+9LgFxCS7iJCfOGBvCgZeTKI=
github.com/bytedance/gopkg v0.0.0-20230728082804-614d0af6619b/go.mod h1:FtQG3YbQG9L/91pbKSw787yBQPutC+457AvDW77fgUQ=
-github.com/bytedance/mockey v1.2.0 h1:847+X2fBSM4s/AIN4loO5d16PCgEj53j7Q8YVB+8P6c=
+github.com/bytedance/gopkg v0.0.0-20240202110943-5e26950c5e57 h1:lXHfN6aablmJUX76DO3BuathM5+9gftKx/iFv1RLqcg=
+github.com/bytedance/gopkg v0.0.0-20240202110943-5e26950c5e57/go.mod h1:FtQG3YbQG9L/91pbKSw787yBQPutC+457AvDW77fgUQ=
github.com/bytedance/mockey v1.2.0/go.mod h1:+Jm/fzWZAuhEDrPXVjDf/jLM2BlLXJkwk94zf2JZ3X4=
+github.com/bytedance/mockey v1.2.7 h1:8j4yCqS5OmMe2dQCxPit4FVkwTK9nrykIgbOZN3s28o=
+github.com/bytedance/mockey v1.2.7/go.mod h1:bNrUnI1u7+pAc0TYDgPATM+wF2yzHxmNH+iDXg4AOCU=
github.com/bytedance/sonic v1.5.0/go.mod h1:ED5hyg4y6t3/9Ku1R6dU/4KyJ48DZ4jPhfY1O2AihPM=
github.com/bytedance/sonic v1.8.8/go.mod h1:i736AoUSYt75HyZLoJW9ERYxcy6eaN6h4BZXU064P/U=
-github.com/bytedance/sonic v1.9.1 h1:6iJ6NqdoxCDr6mbY8h18oSO+cShGSMRGCEo7F2h0x8s=
-github.com/bytedance/sonic v1.9.1/go.mod h1:i736AoUSYt75HyZLoJW9ERYxcy6eaN6h4BZXU064P/U=
+github.com/bytedance/sonic v1.10.0-rc/go.mod h1:ElCzW+ufi8qKqNW0FY314xriJhyJhuoJ3gFZdAHF7NM=
+github.com/bytedance/sonic v1.10.2/go.mod h1:iZcSUejdk5aukTND/Eu/ivjQuEL0Cu9/rf50Hi0u/g4=
+github.com/bytedance/sonic v1.11.0 h1:FwNNv6Vu4z2Onf1++LNzxB/QhitD8wuTdpZzMTGITWo=
+github.com/bytedance/sonic v1.11.0/go.mod h1:iZcSUejdk5aukTND/Eu/ivjQuEL0Cu9/rf50Hi0u/g4=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
+github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
+github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/chenzhuoyu/base64x v0.0.0-20211019084208-fb5309c8db06/go.mod h1:DH46F32mSOjUmXrMHnKwZdA8wcEefY7UVqBKYGjpdQY=
-github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 h1:qSGYFH7+jGhDF8vLC+iwCD4WpbV1EBDSzWkJODFLams=
github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311/go.mod h1:b583jCggY9gE99b6G5LEC39OIiVsWj+R97kbl5odCEk=
+github.com/chenzhuoyu/base64x v0.0.0-20230717121745-296ad89f973d h1:77cEq6EriyTZ0g/qfRdp61a3Uu/AWrgIq2s0ClJV1g0=
+github.com/chenzhuoyu/base64x v0.0.0-20230717121745-296ad89f973d/go.mod h1:8EPpVsBuRksnlj1mLy4AWzRNQYxauNi62uWcE3to6eA=
github.com/chenzhuoyu/iasm v0.0.0-20220818063314-28c361dae733/go.mod h1:wOQ0nsbeOLa2awv8bUYFW/EHXbjQMlZ10fAlXDB2sz8=
github.com/chenzhuoyu/iasm v0.0.0-20230222070914-0b1b64b0e762/go.mod h1:Xjy2NpN3h7aUqeqM+woSuuvxmIe6+DDsiNLIrkAmYog=
-github.com/chenzhuoyu/iasm v0.9.0 h1:9fhXjVzq5hUy2gkhhgHl95zG2cEAhw9OSGs8toWWAwo=
github.com/chenzhuoyu/iasm v0.9.0/go.mod h1:Xjy2NpN3h7aUqeqM+woSuuvxmIe6+DDsiNLIrkAmYog=
+github.com/chenzhuoyu/iasm v0.9.1 h1:tUHQJXo3NhBqw6s33wkGn9SP3bvrWLdlVIJ3hQBL7P0=
+github.com/chenzhuoyu/iasm v0.9.1/go.mod h1:Xjy2NpN3h7aUqeqM+woSuuvxmIe6+DDsiNLIrkAmYog=
github.com/choleraehyq/pid v0.0.13/go.mod h1:uhzeFgxJZWQsZulelVQZwdASxQ9TIPZYL4TPkQMtL/U=
github.com/choleraehyq/pid v0.0.15/go.mod h1:uhzeFgxJZWQsZulelVQZwdASxQ9TIPZYL4TPkQMtL/U=
github.com/choleraehyq/pid v0.0.16/go.mod h1:uhzeFgxJZWQsZulelVQZwdASxQ9TIPZYL4TPkQMtL/U=
-github.com/choleraehyq/pid v0.0.17 h1:BLBfHTllp2nRRbZ/cOFHKlx9oWJuMwKmp7GqB5d58Hk=
github.com/choleraehyq/pid v0.0.17/go.mod h1:uhzeFgxJZWQsZulelVQZwdASxQ9TIPZYL4TPkQMtL/U=
+github.com/choleraehyq/pid v0.0.18 h1:O7LLxPoOyt3YtonlCC8BmNrF9P6Hc8B509UOqlPSVhw=
+github.com/choleraehyq/pid v0.0.18/go.mod h1:uhzeFgxJZWQsZulelVQZwdASxQ9TIPZYL4TPkQMtL/U=
github.com/chzyer/logex v1.2.0/go.mod h1:9+9sk7u7pGNWYMkh0hdiL++6OeibzJccyQU4p4MedaY=
github.com/chzyer/readline v1.5.0/go.mod h1:x22KAscuvRqlLoK9CsoYsmxoXZMMFVyOl86cAH8qUic=
github.com/chzyer/test v0.0.0-20210722231415-061457976a23/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
+github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag=
+github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cloudwego/configmanager v0.2.0 h1:niVpVg+wQ+npNqnH3dup96SMbR02Pk+tNErubYCJqKo=
github.com/cloudwego/configmanager v0.2.0/go.mod h1:FLIQTjxsZRGjnmDhTttWQTy6f6DghPTatfBVOs2gQLk=
github.com/cloudwego/dynamicgo v0.1.0/go.mod h1:Mdsz0XGsIImi15vxhZaHZpspNChEmBMIiWkUfD6JDKg=
-github.com/cloudwego/dynamicgo v0.1.2 h1:t5KMzo/UkT002n3EvGI0Y6+Me73NGDzFI/AQlT1LQME=
-github.com/cloudwego/dynamicgo v0.1.2/go.mod h1:AdPqyFN+0+fc3iVSSWojDCnOGPkzH+T0rI65017GCUA=
+github.com/cloudwego/dynamicgo v0.1.6/go.mod h1:WzbIYLbhR4tjUhEMmRZRNIQXZu5J18oPurGDj5UmU9I=
+github.com/cloudwego/dynamicgo v0.2.0 h1:2mIqwYjS4TvjIov+dV5/y4OO33x/YMdfaeiRgXiineg=
+github.com/cloudwego/dynamicgo v0.2.0/go.mod h1:zTbRLRyBdP+OLalvkiwWPnvg84v1UungzT7iuL/2Qgc=
github.com/cloudwego/fastpb v0.0.3/go.mod h1:/V13XFTq2TUkxj2qWReV8MwfPC4NnPcy6FsrojnsSG0=
github.com/cloudwego/fastpb v0.0.4 h1:/ROVVfoFtpfc+1pkQLzGs+azjxUbSOsAqSY4tAAx4mg=
github.com/cloudwego/fastpb v0.0.4/go.mod h1:/V13XFTq2TUkxj2qWReV8MwfPC4NnPcy6FsrojnsSG0=
github.com/cloudwego/frugal v0.1.3/go.mod h1:b981ViPYdhI56aFYsoMjl9kv6yeqYSO+iEz2jrhkCgI=
github.com/cloudwego/frugal v0.1.6/go.mod h1:9ElktKsh5qd2zDBQ5ENhPSQV7F2dZ/mXlr1eaZGDBFs=
-github.com/cloudwego/frugal v0.1.7 h1:Ggyk8mk0WrhBlM4g4RJxdOcVWJl/Hxbd8NJ19J8My6c=
-github.com/cloudwego/frugal v0.1.7/go.mod h1:3VECBCSiTYwm3QApqHXjZB9NDH+8hUw7txxlr+6pPb4=
+github.com/cloudwego/frugal v0.1.12/go.mod h1:zFBA63ne4+Tz4qayRZFZf+ZVwGqTzb+1Xe3ZDCq+Wfc=
+github.com/cloudwego/frugal v0.1.13 h1:s2G93j/DqANEUnYpvdf3mz760yGdCGs5o3js7dNU4Ig=
+github.com/cloudwego/frugal v0.1.13/go.mod h1:zFBA63ne4+Tz4qayRZFZf+ZVwGqTzb+1Xe3ZDCq+Wfc=
github.com/cloudwego/kitex v0.3.2/go.mod h1:/XD07VpUD9VQWmmoepASgZ6iw//vgWikVA9MpzLC5i0=
github.com/cloudwego/kitex v0.4.4/go.mod h1:3FcH5h9Qw+dhRljSzuGSpWuThttA8DvK0BsL7HUYydo=
github.com/cloudwego/kitex v0.6.1/go.mod h1:zI1GBrjT0qloTikcCfQTgxg3Ws+yQMyaChEEOcGNUvA=
-github.com/cloudwego/kitex v0.6.2-0.20230814131251-645fec2e4585 h1:PHWx7esQA/VEsVJEPuNL8jFigLIfHQdug62BkagS4xI=
-github.com/cloudwego/kitex v0.6.2-0.20230814131251-645fec2e4585/go.mod h1:RVWi+MbiPzI0Gi7fz8KZp+zsxB1/pLJZkr4kEwAuX6k=
+github.com/cloudwego/kitex v0.8.0 h1:eL6Xb2vnHfOjvDqmPsvCuheDo513lOc1HG6hSHGiFyM=
+github.com/cloudwego/kitex v0.8.0/go.mod h1:5o98nYKp8GwauvA1hhJwTA3YQcPa8Nu5tx+2j+JjwoM=
github.com/cloudwego/localsession v0.0.2 h1:N9/IDtCPj1fCL9bCTP+DbXx3f40YjVYWcwkJG0YhQkY=
github.com/cloudwego/localsession v0.0.2/go.mod h1:kiJxmvAcy4PLgKtEnPS5AXed3xCiXcs7Z+KBHP72Wv8=
github.com/cloudwego/netpoll v0.2.4/go.mod h1:1T2WVuQ+MQw6h6DpE45MohSvDTKdy2DlzCx2KsnPI4E=
github.com/cloudwego/netpoll v0.3.1/go.mod h1:1T2WVuQ+MQw6h6DpE45MohSvDTKdy2DlzCx2KsnPI4E=
github.com/cloudwego/netpoll v0.4.0/go.mod h1:xVefXptcyheopwNDZjDPcfU6kIjZXZ4nY550k1yH9eQ=
-github.com/cloudwego/netpoll v0.4.1 h1:/pGsY7Rs09KqEXEniB9fcsEWfi1iY+66bKUO3/NO6hc=
-github.com/cloudwego/netpoll v0.4.1/go.mod h1:xVefXptcyheopwNDZjDPcfU6kIjZXZ4nY550k1yH9eQ=
+github.com/cloudwego/netpoll v0.5.1 h1:zDUF7xF0C97I10fGlQFJ4jg65khZZMUvSu/TWX44Ohc=
+github.com/cloudwego/netpoll v0.5.1/go.mod h1:xVefXptcyheopwNDZjDPcfU6kIjZXZ4nY550k1yH9eQ=
github.com/cloudwego/thriftgo v0.1.2/go.mod h1:LzeafuLSiHA9JTiWC8TIMIq64iadeObgRUhmVG1OC/w=
github.com/cloudwego/thriftgo v0.2.4/go.mod h1:8i9AF5uDdWHGqzUhXDlubCjx4MEfKvWXGQlMWyR0tM4=
github.com/cloudwego/thriftgo v0.2.7/go.mod h1:8i9AF5uDdWHGqzUhXDlubCjx4MEfKvWXGQlMWyR0tM4=
github.com/cloudwego/thriftgo v0.2.11/go.mod h1:dAyXHEmKXo0LfMCrblVEY3mUZsdeuA5+i0vF5f09j7E=
-github.com/cloudwego/thriftgo v0.3.0 h1:BBb9hVcqmu9p4iKUP/PSIaDB21Vfutgd7k2zgK37Q9Q=
-github.com/cloudwego/thriftgo v0.3.0/go.mod h1:AvH0iEjvKHu3cdxG7JvhSAaffkS4h2f4/ZxpJbm48W4=
+github.com/cloudwego/thriftgo v0.3.3/go.mod h1:29ukiySoAMd0vXMYIduAY9dph/7dmChvOS11YLotFb8=
+github.com/cloudwego/thriftgo v0.3.6 h1:gHHW8Ag3cAEQ/awP4emTJiRPr5yQjbANhcsmV8/Epbw=
+github.com/cloudwego/thriftgo v0.3.6/go.mod h1:29ukiySoAMd0vXMYIduAY9dph/7dmChvOS11YLotFb8=
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/dlclark/regexp2 v1.10.0/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8=
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
@@ -95,13 +121,19 @@ github.com/go-fonts/liberation v0.1.1/go.mod h1:K6qoJYypsmfVjWg8KOVDQhLc8UDgIK2H
github.com/go-fonts/liberation v0.2.0/go.mod h1:K6qoJYypsmfVjWg8KOVDQhLc8UDgIK2HYqyqAO9z7GY=
github.com/go-fonts/stix v0.1.0/go.mod h1:w/c1f0ldAUlJmLBvlbkvVXLAD+tAMqobIIQpmnUIzUY=
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
+github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
+github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-latex/latex v0.0.0-20210118124228-b3d85cf34e07/go.mod h1:CO1AlKB2CSIqUrmQPqA0gdRIlnLEY0gK5JGjh37zN5U=
github.com/go-latex/latex v0.0.0-20210823091927-c0d11ff05a81/go.mod h1:SX0U8uGpxhq9o2S/CELCSUxEWWAuoCUcVCQWv7G2OCk=
+github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
+github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
github.com/go-pdf/fpdf v0.5.0/go.mod h1:HzcnA+A23uwogo0tp9yU+l3V+KXhiESpt1PMayhOh5M=
github.com/go-pdf/fpdf v0.6.0/go.mod h1:HzcnA+A23uwogo0tp9yU+l3V+KXhiESpt1PMayhOh5M=
-github.com/go-sql-driver/mysql v1.7.0 h1:ueSltNNllEqE3qcWBTD0iQd3IpL/6U+mJxLkazJ7YPc=
-github.com/go-sql-driver/mysql v1.7.0/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI=
+github.com/go-sql-driver/mysql v1.7.1 h1:lUIinVbN1DY0xBg0eMOzmmtGoHwWBbvnWubQUrtU8EI=
+github.com/go-sql-driver/mysql v1.7.1/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI=
+github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
+github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
@@ -109,6 +141,7 @@ github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfb
github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc=
github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
@@ -128,27 +161,44 @@ github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE=
-github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
-github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
-github.com/google/pprof v0.0.0-20220608213341-c488b8fa1db3 h1:mpL/HvfIgIejhVwAfxBQkwEjlhP5o0O9RAeTAjpwzxc=
github.com/google/pprof v0.0.0-20220608213341-c488b8fa1db3/go.mod h1:gSuNB+gJaOiQKLEZ+q+PK9Mq3SOzhRcw2GsGS/FhYDk=
+github.com/google/pprof v0.0.0-20240207164012-fb44976bdcd5 h1:E/LAvt58di64hlYjx7AsNS6C/ysHWYo+2qPCZKTQhRo=
+github.com/google/pprof v0.0.0-20240207164012-fb44976bdcd5/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/gordonklaus/ineffassign v0.0.0-20200309095847-7953dde2c7bf/go.mod h1:cuNKsD1zp2v6XfE/orVX2QE1LC+i254ceGcVeDT3pTU=
+github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
+github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
+github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc=
+github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
+github.com/hashicorp/go-metrics v0.5.3 h1:M5uADWMOGCTUNU1YuC4hfknOeHNaX54LDm4oYSucoNE=
+github.com/hashicorp/go-metrics v0.5.3/go.mod h1:KEjodfebIOuBYSAe/bHTm+HChmKSxAOXPBieMLYozDE=
+github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs=
+github.com/hashicorp/go-uuid v1.0.0 h1:RS8zrF7PhGwyNPOtxSClXXj9HA8feRnJzgnI1RJCSnM=
+github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
+github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
+github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iPY6p1c=
+github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
-github.com/iancoleman/strcase v0.2.0 h1:05I4QRnGpI0m37iZQRuskXh+w77mr6Z41lwQzuHLwW0=
github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho=
+github.com/iancoleman/strcase v0.3.0 h1:nTXanmYxhfFAMjZL34Ov6gkzEsSJZ5DbhxWjvSASxEI=
+github.com/iancoleman/strcase v0.3.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho=
github.com/ianlancetaylor/demangle v0.0.0-20220319035150-800ac71e25c2/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w=
-github.com/jhump/protoreflect v1.8.2 h1:k2xE7wcUomeqwY0LDCYA16y4WWfyTcMx5mKhk0d4ua0=
github.com/jhump/protoreflect v1.8.2/go.mod h1:7GcYQDdMU/O/BBrl/cX6PNHpXh6cenjd8pneu5yW7Tg=
+github.com/jhump/protoreflect v1.15.6 h1:WMYJbw2Wo+KOWwZFvgY0jMoVHM6i4XIvRs2RcBj5VmI=
+github.com/jhump/protoreflect v1.15.6/go.mod h1:jCHoyYQIJnaabEYnbGwyo9hUqfyUMTbJw/tAut5t97E=
+github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
+github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
+github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/jung-kurt/gofpdf v1.0.0/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes=
github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes=
github.com/keepeye/logrus-filename v0.0.0-20190711075016-ce01a4391dd1 h1:JL2rWnBX8jnbHHlLcLde3BBWs+jzqZvOmF+M3sXoNOE=
@@ -157,30 +207,40 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
github.com/klauspost/cpuid/v2 v2.1.0/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY=
-github.com/klauspost/cpuid/v2 v2.2.4 h1:acbojRNwl3o09bUq+yDCtZFc1aiwaAAxtcn8YkZXnvk=
github.com/klauspost/cpuid/v2 v2.2.4/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY=
+github.com/klauspost/cpuid/v2 v2.2.6 h1:ndNyv040zDGIDh8thGkXYjnFtiN02M1PVVF+JE/48xc=
+github.com/klauspost/cpuid/v2 v2.2.6/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
github.com/knz/go-libedit v1.10.1/go.mod h1:MZTVkCWyz0oBc7JOWP3wNAzd002ZbM/5hgShxwh4x8M=
-github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
+github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
+github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
+github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
+github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw=
+github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
github.com/mattn/go-isatty v0.0.13/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
-github.com/mattn/go-isatty v0.0.16 h1:bq3VjFmv/sOjHtdEhmkEV4x1AJtvUvOJ2PFAZ5+peKQ=
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
-github.com/mattn/go-sqlite3 v1.14.17 h1:mCRHCLDUBXgpKAqIKsaAaAsrAlbkeomtRFKXh2L6YIM=
-github.com/mattn/go-sqlite3 v1.14.17/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg=
+github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
+github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
+github.com/mattn/go-sqlite3 v1.14.22 h1:2gZY6PC6kBnID23Tichd1K+Z0oS6nE/XwU+Vz/5o4kU=
+github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y=
+github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d h1:5PJl274Y63IEHC+7izoQE9x6ikvDFZS2mDVS3drnohI=
github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE=
-github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421 h1:ZqeYNhU3OHLH3mGKHDcjJRFFRrJa6eAM5H+CtDdOsPc=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
+github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
+github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/gls v0.0.0-20220109145502-612d0167dce5 h1:uiS4zKYKJVj5F3ID+5iylfKPsEQmBEOucSD9Vgmn0i0=
github.com/modern-go/gls v0.0.0-20220109145502-612d0167dce5/go.mod h1:I8AX+yW//L8Hshx6+a1m3bYkwXkpsVjA2795vP4f4oQ=
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
+github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
+github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/nishanths/predeclared v0.0.0-20200524104333-86fad755b4d3/go.mod h1:nt3d53pc1VYcphSCIaYAJtnPYnr3Zyn8fMq2wvPGPso=
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=
@@ -195,30 +255,56 @@ github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7J
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
github.com/onsi/gomega v1.27.8 h1:gegWiwZjBsf2DgiSbf5hpokZ98JVDMcWkUiigk6/KXc=
github.com/onsi/gomega v1.27.8/go.mod h1:2J8vzI/s+2shY9XHRApDkdgPo1TKT7P2u6fXeJKFnNQ=
+github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY=
+github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
github.com/phpdave11/gofpdf v1.4.2/go.mod h1:zpO6xFn9yxo3YLyMvW8HcKWVdbNqgIfOOp2dXMnm1mY=
github.com/phpdave11/gofpdi v1.0.12/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI=
github.com/phpdave11/gofpdi v1.0.13/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI=
+github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
-github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
+github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
+github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
+github.com/prometheus/client_golang v1.18.0 h1:HzFfmkOzH5Q8L8G+kSJKUx5dtG87sewO+FoDDqP5Tbk=
+github.com/prometheus/client_golang v1.18.0/go.mod h1:T+GXkCk5wSJyOqMIzVgvvjFDlkOQntgjkJWKrN5txjA=
+github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
+github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/prometheus/client_model v0.6.0 h1:k1v3CzpSRUTrKMppY35TLwPvxHqBu0bYgxZzqGIgaos=
+github.com/prometheus/client_model v0.6.0/go.mod h1:NTQHnmxFpouOD0DpvP4XujX3CdOAGQPoaGhyTchlyt8=
+github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
+github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4=
+github.com/prometheus/common v0.47.0 h1:p5Cz0FNHo7SnWOmWmoRozVcjEp0bIVU8cV7OShpjL1k=
+github.com/prometheus/common v0.47.0/go.mod h1:0/KsvlIEfPQCQ5I2iNSAWKPZziNCvRs5EC6ILDTlAPc=
+github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
+github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
+github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
+github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo=
+github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo=
github.com/rakyll/statik v0.1.7/go.mod h1:AlZONWzMtEnMs7W4e/1LURLiI49pIMmp6V9Unghqrcc=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
+github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
github.com/ruudk/golang-pdf417 v0.0.0-20181029194003-1af4ab5afa58/go.mod h1:6lfFZQK844Gfx8o5WFuvpxWRwnSoipWe/p622j1v06w=
github.com/ruudk/golang-pdf417 v0.0.0-20201230142125-a7e3863a1245/go.mod h1:pQAZKsJ8yyVxGRWYNEm9oFB8ieLgKFnamEyDmSA0BRk=
-github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0=
-github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
+github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
+github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
+github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
+github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s=
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
+github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
@@ -231,14 +317,17 @@ github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXl
github.com/t-tomalak/logrus-prefixed-formatter v0.5.2 h1:m4hdfSF9f2R5imvZJzEzit4Sm9i12JgXEZCIrTTrBL4=
github.com/t-tomalak/logrus-prefixed-formatter v0.5.2/go.mod h1:koTBrtn4EvuRvh8ay81sCRdAqXhys32PXxMjJbe0FO0=
github.com/thrift-iterator/go v0.0.0-20190402154806-9b5a67519118/go.mod h1:60PRwE/TCI1UqLvn8v2pwAf6+yzTPLP/Ji5xaesWDqk=
-github.com/tidwall/btree v1.6.0 h1:LDZfKfQIBHGHWSwckhXI0RPSXzlo+KYdjK7FWSqOzzg=
-github.com/tidwall/btree v1.6.0/go.mod h1:twD9XRA5jj9VUQGELzDO4HPQTNJsoWWfYEL+EUQ2cKY=
-github.com/tidwall/gjson v1.9.3 h1:hqzS9wAHMO+KVBBkLxYdkEeeFHuqr95GfClRLKlgK0E=
+github.com/tidwall/btree v1.7.0 h1:L1fkJH/AuEh5zBnnBbmTwQ5Lt+bRJ5A8EWecslvo9iI=
+github.com/tidwall/btree v1.7.0/go.mod h1:twD9XRA5jj9VUQGELzDO4HPQTNJsoWWfYEL+EUQ2cKY=
github.com/tidwall/gjson v1.9.3/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk=
+github.com/tidwall/gjson v1.17.1 h1:wlYEnwqAHgzmhNUFfw7Xalt2JzQvsMx2Se4PcoFCT/U=
+github.com/tidwall/gjson v1.17.1/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk=
github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA=
github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM=
-github.com/tidwall/pretty v1.2.0 h1:RWIZEg2iJ8/g6fDDYzMpobmaoGh5OLl4AXtGUGPcqCs=
github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU=
+github.com/tidwall/pretty v1.2.1 h1:qjsOFOWWQl+N3RsoF5/ssm1pHmJJwhjlSbZ51I6wMl4=
+github.com/tidwall/pretty v1.2.1/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU=
+github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM=
github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI=
github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08=
github.com/v2pro/plz v0.0.0-20221028024117-e5f9aec5b631/go.mod h1:3gacX+hQo+xvl0vtLqCMufzxuNCwt4geAVOMt2LQYfE=
@@ -252,35 +341,30 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
-go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw=
-go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
-go.uber.org/goleak v1.1.11 h1:wy28qYRKZgnJTxGxvye5/wgWr1EKjmUDGYox5mGlRlI=
-go.uber.org/mock v0.2.0 h1:TaP3xedm7JaAgScZO7tlvlKrqT0p7I6OsdGB5YNSMDU=
-go.uber.org/mock v0.2.0/go.mod h1:J0y0rp9L3xiff1+ZBfKxlC1fz2+aO16tw0tsDOixfuM=
-go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4=
-go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
-go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60=
-go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg=
+go.uber.org/mock v0.4.0 h1:VcM4ZOtdbR4f6VXfiOpwpVJDL6lCReaZ6mw31wqh7KU=
+go.uber.org/mock v0.4.0/go.mod h1:a6FSlNadKUHUa9IP5Vyt1zh4fC7uAwxMutEAscFbkZc=
golang.org/x/arch v0.0.0-20201008161808-52c3e6f60cff/go.mod h1:flIaEI6LNU6xOCD5PaJvn9wGP0agmIOqjrtsKGRguv4=
golang.org/x/arch v0.0.0-20210923205945-b76863e36670/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
golang.org/x/arch v0.0.0-20220722155209-00200b7164a7/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
-golang.org/x/arch v0.2.0 h1:W1sUEHXiJTfjaFJ5SLo0N6lZn+0eO5gWD1MFeTGqQEY=
golang.org/x/arch v0.2.0/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
+golang.org/x/arch v0.7.0 h1:pskyeJh/3AmoQ8CPE95vxHLqp1G1GfGNXTmcl9NEKTc=
+golang.org/x/arch v0.7.0/go.mod h1:FEVrYAQjsQXMVJ1nsMoVVXPZg6p2JE2mx8psSWTDQys=
+golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
-golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc=
-golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4=
+golang.org/x/crypto v0.19.0 h1:ENy+Az/9Y1vSrlrvBSyna3PITt4tiZLf7sgCjZBX7Wo=
+golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20191002040644-a1355ae1e2c3/go.mod h1:NOZ3BPKG0ec/BKJQgnvsSFpcKLM5xXVWnvZS97DWHgE=
-golang.org/x/exp v0.0.0-20230626212559-97b1e661b5df h1:UA2aFVmmsIlefxMk29Dp2juaUSth8Pyn3Tq5Y5mJGME=
-golang.org/x/exp v0.0.0-20230626212559-97b1e661b5df/go.mod h1:FXUEEKJgO7OQYeo8N01OfiKP8RXMtf6e8aTskBGqWdc=
+golang.org/x/exp v0.0.0-20240213143201-ec583247a57a h1:HinSgX1tJRX3KsL//Gxynpw5CTOAIPhgL4W8PNiIpVE=
+golang.org/x/exp v0.0.0-20240213143201-ec583247a57a/go.mod h1:CxmFvTBINI24O/j8iY7H1xHzx2i4OsyguNBmN/uPtqc=
golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
@@ -310,9 +394,11 @@ golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
@@ -325,28 +411,33 @@ golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qx
golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.0.0-20221014081412-f15817d10f9b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk=
-golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM=
-golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE=
+golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4=
+golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E=
-golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
+golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ=
+golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -370,20 +461,22 @@ golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220817070843-5a390386f1f2/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE=
-golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y=
+golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
-golang.org/x/term v0.13.0 h1:bb+I9cTfFazGW51MZqBVmZy7+JEJMouUHTUSKVQLBek=
-golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U=
+golang.org/x/term v0.17.0 h1:mkTF7LCd6WGJNL3K1Ad7kwxNfYAW6a8a8QqtMblp/4U=
+golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
-golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k=
-golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
+golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
+golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
@@ -422,14 +515,16 @@ google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
-google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384 h1:z+j74wi4yV+P7EtK9gPLGukOk7mFOy9wMQaC0wNb7eY=
google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240213162025-012b6fc9bca9 h1:hZB7eLIaYlW9qXRfCq/qDaPdbeY3757uARz5Vvfv+cY=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240213162025-012b6fc9bca9/go.mod h1:YUWgXUFRPfoYK1IHMuxH5K6nPEXSCzIMljnQ59lLRCk=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
-google.golang.org/grpc v1.36.1 h1:cmUfbeGKnz9+2DD/UYsMQXeqbHZqZDs4eQwW0sFOpBY=
google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
+google.golang.org/grpc v1.60.1 h1:26+wFr+cNqSGFcOXcabYC0lUVJVRa2Sb2ortSK7VrEU=
+google.golang.org/grpc v1.60.1/go.mod h1:OlCHIeLYqSSsLi6i49B5QGdzaMZK9+M7LXN2FKz4eGM=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
@@ -443,11 +538,14 @@ google.golang.org/protobuf v1.25.1-0.20200805231151-a709e31e5d12/go.mod h1:9JNX7
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
-google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w=
google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
+google.golang.org/protobuf v1.32.0 h1:pPC6BG5ex8PDFnkbrGU3EixyhKcQ2aDuBS36lqK/C7I=
+google.golang.org/protobuf v1.32.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
+gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
@@ -455,8 +553,10 @@ gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST
gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
+gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
diff --git a/pkg/ccr/base/backend.go b/pkg/ccr/base/backend.go
index 2878de17..cca459d0 100644
--- a/pkg/ccr/base/backend.go
+++ b/pkg/ccr/base/backend.go
@@ -3,17 +3,16 @@ package base
import "fmt"
type Backend struct {
- Id int64
- Host string
- HeartbeatPort uint16
- BePort uint16
- HttpPort uint16
- BrpcPort uint16
+ Id int64
+ Host string
+ BePort uint16
+ HttpPort uint16
+ BrpcPort uint16
}
// Backend Stringer
func (b *Backend) String() string {
- return fmt.Sprintf("Backend: {Id: %d, Host: %s, HeartbeatPort: %d, BePort: %d, HttpPort: %d, BrpcPort: %d}", b.Id, b.Host, b.HeartbeatPort, b.BePort, b.HttpPort, b.BrpcPort)
+ return fmt.Sprintf("Backend: {Id: %d, Host: %s, BePort: %d, HttpPort: %d, BrpcPort: %d}", b.Id, b.Host, b.BePort, b.HttpPort, b.BrpcPort)
}
func (b *Backend) GetHttpPortStr() string {
diff --git a/pkg/ccr/base/pool.go b/pkg/ccr/base/pool.go
index ddca4614..3ac77ad8 100644
--- a/pkg/ccr/base/pool.go
+++ b/pkg/ccr/base/pool.go
@@ -46,5 +46,3 @@ func GetMysqlDB(dsn string) (*sql.DB, error) {
return db, nil
}
}
-
-// TODO: 添加超时和Ping检测
diff --git a/pkg/ccr/base/spec.go b/pkg/ccr/base/spec.go
index 08ddfbae..94ddceba 100644
--- a/pkg/ccr/base/spec.go
+++ b/pkg/ccr/base/spec.go
@@ -3,21 +3,30 @@ package base
import (
"database/sql"
"fmt"
+ "regexp"
"strconv"
"strings"
"time"
_ "github.com/go-sql-driver/mysql"
+ "github.com/selectdb/ccr_syncer/pkg/ccr/record"
"github.com/selectdb/ccr_syncer/pkg/utils"
"github.com/selectdb/ccr_syncer/pkg/xerror"
+
log "github.com/sirupsen/logrus"
- "go.uber.org/zap"
)
+var ErrRestoreSignatureNotMatched = xerror.NewWithoutStack(xerror.Normal, "The signature is not matched, the table already exist but with different schema")
+var ErrBackupTableNotFound = xerror.NewWithoutStack(xerror.Normal, "backup table not found")
+var ErrBackupPartitionNotFound = xerror.NewWithoutStack(xerror.Normal, "backup partition not found")
+
const (
BACKUP_CHECK_DURATION = time.Second * 3
RESTORE_CHECK_DURATION = time.Second * 3
MAX_CHECK_RETRY_TIMES = 86400 // 3 day
+ SIGNATURE_NOT_MATCHED = "already exist but with different schema"
+
+ FE_CONFIG_ENABLE_RESTORE_SNAPSHOT_COMPRESSION = "enable_restore_snapshot_rpc_compression"
)
type BackupState int
@@ -94,13 +103,109 @@ func _parseRestoreState(state string) RestoreState {
}
}
+type RestoreInfo struct {
+ State RestoreState
+ StateStr string
+ Label string
+ Status string
+ Timestamp string
+ ReplicationNum int64
+ CreateTime string // 2024-10-22 06:29:27
+}
+
+func parseRestoreInfo(parser *utils.RowParser) (*RestoreInfo, error) {
+ restoreStateStr, err := parser.GetString("State")
+ if err != nil {
+ return nil, xerror.Wrap(err, xerror.Normal, "parse restore State failed")
+ }
+
+ label, err := parser.GetString("Label")
+ if err != nil {
+ return nil, xerror.Wrap(err, xerror.Normal, "parse restore Label failed")
+ }
+
+ restoreStatus, err := parser.GetString("Status")
+ if err != nil {
+ return nil, xerror.Wrap(err, xerror.Normal, "parse restore Status failed")
+ }
+
+ timestamp, err := parser.GetString("Timestamp")
+ if err != nil {
+ return nil, xerror.Wrap(err, xerror.Normal, "parse restore Timestamp failed")
+ }
+
+ replicationNum, err := parser.GetInt64("ReplicationNum")
+ if err != nil {
+ return nil, xerror.Wrap(err, xerror.Normal, "parse restore ReplicationNum failed")
+ }
+
+ createTime, err := parser.GetString("CreateTime")
+ if err != nil {
+ return nil, xerror.Wrap(err, xerror.Normal, "parse restore CreateTime failed")
+ }
+
+ info := &RestoreInfo{
+ State: _parseRestoreState(restoreStateStr),
+ StateStr: restoreStateStr,
+ Label: label,
+ Status: restoreStatus,
+ Timestamp: timestamp,
+ ReplicationNum: replicationNum,
+ CreateTime: createTime,
+ }
+ return info, nil
+}
+
+type BackupInfo struct {
+ State BackupState
+ StateStr string
+ SnapshotName string
+ Status string
+ CreateTime string // 2024-10-22 06:27:06
+}
+
+func parseBackupInfo(parser *utils.RowParser) (*BackupInfo, error) {
+ stateStr, err := parser.GetString("State")
+ if err != nil {
+ return nil, xerror.Wrap(err, xerror.Normal, "parse backup State failed")
+ }
+
+ snapshotName, err := parser.GetString("SnapshotName")
+ if err != nil {
+ return nil, xerror.Wrap(err, xerror.Normal, "parse backup SnapshotName failed")
+ }
+
+ createTime, err := parser.GetString("CreateTime")
+ if err != nil {
+ return nil, xerror.Wrap(err, xerror.Normal, "parse backup CreateTime failed")
+ }
+
+ status, err := parser.GetString("Status")
+ if err != nil {
+ return nil, xerror.Wrap(err, xerror.Normal, "parse backup Status failed")
+ }
+
+ info := &BackupInfo{
+ State: ParseBackupState(stateStr),
+ StateStr: stateStr,
+ SnapshotName: snapshotName,
+ CreateTime: createTime,
+ Status: status,
+ }
+ return info, nil
+}
+
type Frontend struct {
Host string `json:"host"`
Port string `json:"port"`
ThriftPort string `json:"thrift_port"`
+ IsMaster bool `json:"is_master"`
+}
+
+func (f *Frontend) String() string {
+ return fmt.Sprintf("host: %s, port: %s, thrift_port: %s, is_master: %v", f.Host, f.Port, f.ThriftPort, f.IsMaster)
}
-// TODO(Drogon): timeout config
type Spec struct {
// embed Frontend as current master frontend
Frontend
@@ -115,6 +220,9 @@ type Spec struct {
Table string `json:"table"`
TableId int64 `json:"table_id"`
+ // The mapping of host private and public ip
+ HostMapping map[string]string `json:"host_mapping,omitempty"`
+
observers []utils.Observer[SpecEvent]
}
@@ -158,10 +266,6 @@ func (s *Spec) Valid() error {
return nil
}
-func (s *Spec) IsSameHostDB(dest *Spec) bool {
- return s.Host == dest.Host && s.Port == dest.Port && s.ThriftPort == dest.ThriftPort && s.Database == dest.Database
-}
-
func (s *Spec) connect(dsn string) (*sql.DB, error) {
return GetMysqlDB(dsn)
}
@@ -196,7 +300,7 @@ func (s *Spec) IsDatabaseEnableBinlog() (bool, error) {
}
var createDBString string
- query := fmt.Sprintf("SHOW CREATE DATABASE %s", s.Database)
+ query := fmt.Sprintf("SHOW CREATE DATABASE %s", utils.FormatKeywordName(s.Database))
rows, err := db.Query(query)
if err != nil {
return false, xerror.Wrap(err, xerror.Normal, query)
@@ -234,7 +338,7 @@ func (s *Spec) IsTableEnableBinlog() (bool, error) {
}
var createTableString string
- query := fmt.Sprintf("SHOW CREATE TABLE %s.%s", s.Database, s.Table)
+ query := fmt.Sprintf("SHOW CREATE TABLE %s.%s", utils.FormatKeywordName(s.Database), utils.FormatKeywordName(s.Table))
rows, err := db.Query(query)
if err != nil {
return false, xerror.Wrap(err, xerror.Normal, query)
@@ -256,13 +360,50 @@ func (s *Spec) IsTableEnableBinlog() (bool, error) {
return false, xerror.Wrap(err, xerror.Normal, query)
}
- log.Infof("table %s.%s create string: %s", s.Database, s.Table, createTableString)
+ log.Tracef("table %s.%s create string: %s", s.Database, s.Table, createTableString)
// check "binlog.enable" = "true" in create table string
binlogEnableString := `"binlog.enable" = "true"`
return strings.Contains(createTableString, binlogEnableString), nil
}
+func (s *Spec) IsEnableRestoreSnapshotCompression() (bool, error) {
+ log.Debugf("check frontend enable restore snapshot compression")
+
+ db, err := s.Connect()
+ if err != nil {
+ return false, err
+ }
+
+ sql := fmt.Sprintf("SHOW FRONTEND CONFIG LIKE '%s'", FE_CONFIG_ENABLE_RESTORE_SNAPSHOT_COMPRESSION)
+ rows, err := db.Query(sql)
+ if err != nil {
+ return false, xerror.Wrap(err, xerror.Normal, "show frontend config failed")
+ }
+ defer rows.Close()
+
+ enableCompress := false
+ if rows.Next() {
+ rowParser := utils.NewRowParser()
+ if err := rowParser.Parse(rows); err != nil {
+ return false, xerror.Wrap(err, xerror.Normal, "parse show frontend config result failed")
+ }
+ value, err := rowParser.GetString("Value")
+ if err != nil {
+ return false, xerror.Wrap(err, xerror.Normal, "parse show frontend config Value failed")
+ }
+ enableCompress = strings.ToLower(value) == "true"
+ }
+
+ if err := rows.Err(); err != nil {
+ return false, xerror.Wrapf(err, xerror.Normal,
+ "check frontend enable restore snapshot compress, sql: %s", sql)
+ }
+
+ log.Debugf("frontend enable restore snapshot compression: %t", enableCompress)
+ return enableCompress, nil
+}
+
func (s *Spec) GetAllTables() ([]string, error) {
log.Debugf("get all tables in database %s", s.Database)
@@ -289,10 +430,122 @@ func (s *Spec) GetAllTables() ([]string, error) {
}
tables = append(tables, table)
}
+
+ if err := rows.Err(); err != nil {
+ return nil, xerror.Wrapf(err, xerror.Normal, "SHOW TABLES")
+ }
+
return tables, nil
}
-func (s *Spec) dropTable(table string) error {
+func (s *Spec) queryResult(querySQL string, queryColumn string, errMsg string) ([]string, error) {
+ db, err := s.ConnectDB()
+ if err != nil {
+ return nil, err
+ }
+
+ rows, err := db.Query(querySQL)
+ if err != nil {
+ return nil, xerror.Wrap(err, xerror.Normal, querySQL+" failed")
+ }
+ defer rows.Close()
+
+ var results []string
+ for rows.Next() {
+ rowParser := utils.NewRowParser()
+ if err := rowParser.Parse(rows); err != nil {
+ return nil, xerror.Wrap(err, xerror.Normal, errMsg)
+ }
+ result, err := rowParser.GetString(queryColumn)
+ if err != nil {
+ return nil, xerror.Wrap(err, xerror.Normal, errMsg)
+ }
+ results = append(results, result)
+ }
+
+ if err := rows.Err(); err != nil {
+ return nil, xerror.Wrapf(err, xerror.Normal, "query result failed, sql: %s", querySQL)
+ }
+
+ return results, nil
+}
+
+func (s *Spec) GetAllViewsFromTable(tableName string) ([]string, error) {
+ log.Debugf("get all view from table %s", tableName)
+
+ var results []string
+ // first, query information_schema.tables with table_schema and table_type, get all views' name
+ querySql := fmt.Sprintf("SELECT table_name FROM information_schema.tables WHERE table_schema = '%s' AND table_type = 'VIEW'", s.Database)
+ viewsFromQuery, err := s.queryResult(querySql, "table_name", "QUERY VIEWS")
+ if err != nil {
+ return nil, xerror.Wrap(err, xerror.Normal, "query views from information schema failed")
+ }
+
+ // then query view's create sql, if create sql contains tableName, this view is wanted
+ viewRegex := regexp.MustCompile("(`internal`\\.`\\w+`|`default_cluster:\\w+`)\\.`" + strings.TrimSpace(tableName) + "`")
+ for _, eachViewName := range viewsFromQuery {
+ showCreateViewSql := fmt.Sprintf("SHOW CREATE VIEW %s", eachViewName)
+ createViewSqlList, err := s.queryResult(showCreateViewSql, "Create View", "SHOW CREATE VIEW")
+ if err != nil {
+ return nil, xerror.Wrap(err, xerror.Normal, "show create view failed")
+ }
+
+ // a view has only one create sql, so use createViewSqlList[0] as the only sql
+ if len(createViewSqlList) > 0 {
+ found := viewRegex.MatchString(createViewSqlList[0])
+ if found {
+ results = append(results, eachViewName)
+ }
+ }
+ }
+
+ log.Debugf("get view result is %s", results)
+ return results, nil
+}
+
+func (s *Spec) RenameTable(destTableName string, renameTable *record.RenameTable) error {
+ destTableName = utils.FormatKeywordName(destTableName)
+ // rename table may be 'rename table', 'rename rollup', 'rename partition'
+ var sql string
+ // ALTER TABLE table1 RENAME table2;
+ if renameTable.NewTableName != "" && renameTable.OldTableName != "" {
+ oldName := utils.FormatKeywordName(renameTable.OldTableName)
+ newName := utils.FormatKeywordName(renameTable.NewTableName)
+ sql = fmt.Sprintf("ALTER TABLE %s RENAME %s", oldName, newName)
+ }
+
+ // ALTER TABLE example_table RENAME ROLLUP rollup1 rollup2;
+ // if rename rollup, table name is unchanged
+ if renameTable.NewRollupName != "" && renameTable.OldRollupName != "" {
+ oldName := utils.FormatKeywordName(renameTable.OldRollupName)
+ newName := utils.FormatKeywordName(renameTable.NewRollupName)
+ sql = fmt.Sprintf("ALTER TABLE %s RENAME ROLLUP %s %s", destTableName, oldName, newName)
+ }
+
+ // ALTER TABLE example_table RENAME PARTITION p1 p2;
+ // if rename partition, table name is unchanged
+ if renameTable.NewPartitionName != "" && renameTable.OldPartitionName != "" {
+ oldName := utils.FormatKeywordName(renameTable.OldPartitionName)
+ newName := utils.FormatKeywordName(renameTable.NewPartitionName)
+ sql = fmt.Sprintf("ALTER TABLE %s RENAME PARTITION %s %s", destTableName, oldName, newName)
+ }
+ if sql == "" {
+ return xerror.Errorf(xerror.Normal, "rename sql is empty")
+ }
+
+ log.Infof("rename table sql: %s", sql)
+ return s.DbExec(sql)
+}
+
+func (s *Spec) RenameTableWithName(oldName, newName string) error {
+ oldName = utils.FormatKeywordName(oldName)
+ newName = utils.FormatKeywordName(newName)
+ sql := fmt.Sprintf("ALTER TABLE %s RENAME %s", oldName, newName)
+ log.Infof("rename table sql: %s", sql)
+ return s.DbExec(sql)
+}
+
+func (s *Spec) dropTable(table string, force bool) error {
log.Infof("drop table %s.%s", s.Database, table)
db, err := s.Connect()
@@ -300,7 +553,11 @@ func (s *Spec) dropTable(table string) error {
return err
}
- sql := fmt.Sprintf("DROP TABLE %s.%s", s.Database, table)
+ suffix := ""
+ if force {
+ suffix = "FORCE"
+ }
+ sql := fmt.Sprintf("DROP TABLE %s.%s %s", utils.FormatKeywordName(s.Database), utils.FormatKeywordName(table), suffix)
_, err = db.Exec(sql)
if err != nil {
return xerror.Wrapf(err, xerror.Normal, "drop table %s.%s failed, sql: %s", s.Database, table, sql)
@@ -316,13 +573,13 @@ func (s *Spec) ClearDB() error {
return err
}
- sql := fmt.Sprintf("DROP DATABASE %s", s.Database)
+ sql := fmt.Sprintf("DROP DATABASE %s", utils.FormatKeywordName(s.Database))
_, err = db.Exec(sql)
if err != nil {
return xerror.Wrapf(err, xerror.Normal, "drop database %s failed", s.Database)
}
- if _, err = db.Exec("CREATE DATABASE " + s.Database); err != nil {
+ if _, err = db.Exec("CREATE DATABASE " + utils.FormatKeywordName(s.Database)); err != nil {
return xerror.Wrapf(err, xerror.Normal, "create database %s failed", s.Database)
}
return nil
@@ -336,26 +593,46 @@ func (s *Spec) CreateDatabase() error {
return nil
}
- if _, err = db.Exec("CREATE DATABASE IF NOT EXISTS " + s.Database); err != nil {
+ if _, err = db.Exec("CREATE DATABASE IF NOT EXISTS " + utils.FormatKeywordName(s.Database)); err != nil {
return xerror.Wrapf(err, xerror.Normal, "create database %s failed", s.Database)
}
return nil
}
-func (s *Spec) CreateTable(stmt string) error {
- db, err := s.Connect()
- if err != nil {
- return nil
+func (s *Spec) CreateTableOrView(createTable *record.CreateTable, srcDatabase string) error {
+ // Creating table will only occur when sync db.
+ // When create view, the db name of sql is source db name, we should use dest db name to create view
+ createSql := createTable.Sql
+ if createTable.IsCreateView() {
+ log.Debugf("create view, use dest db name to replace source db name")
+
+ // replace `internal`.`source_db_name`. or `default_cluster:source_db_name`. to `internal`.`dest_db_name`.
+ originalNameNewStyle := "`internal`.`" + strings.TrimSpace(srcDatabase) + "`."
+ originalNameOldStyle := "`default_cluster:" + strings.TrimSpace(srcDatabase) + "`." // for Doris 2.0.x
+ replaceName := "`internal`.`" + strings.TrimSpace(s.Database) + "`."
+ createSql = strings.ReplaceAll(
+ strings.ReplaceAll(createSql, originalNameNewStyle, replaceName), originalNameOldStyle, replaceName)
+ log.Debugf("original create view sql is %s, after replace, now sql is %s", createTable.Sql, createSql)
}
- if _, err = db.Exec(stmt); err != nil {
- return xerror.Wrapf(err, xerror.Normal, "create table %s.%s failed", s.Database, s.Table)
+ // Compatible with doris 2.1.x, see apache/doris#44834 for details.
+ for strings.Contains(createSql, "MAXVALUEMAXVALUE") {
+ createSql = strings.Replace(createSql, "MAXVALUEMAXVALUE", "MAXVALUE, MAXVALUE", -1)
}
- return nil
+
+ log.Infof("create table or view sql: %s", createSql)
+
+ list := []string{}
+ if strings.Contains(createSql, "agg_state<") {
+ log.Infof("agg_state is exists in the create table sql, set enable_agg_state=true")
+ list = append(list, "SET enable_agg_state=true")
+ }
+ list = append(list, createSql)
+ return s.DbExec(list...)
}
func (s *Spec) CheckDatabaseExists() (bool, error) {
- log.Debug("check database exist by spec", zap.String("spec", s.String()))
+ log.Debugf("check database exist by spec: %s", s.String())
db, err := s.Connect()
if err != nil {
return false, err
@@ -389,14 +666,19 @@ func (s *Spec) CheckDatabaseExists() (bool, error) {
// check table exits in database dir by spec
func (s *Spec) CheckTableExists() (bool, error) {
- log.Debug("check table exists by spec", zap.String("spec", s.String()))
+ log.Debugf("check table exist by spec: %s", s.String())
+ return s.CheckTableExistsByName(s.Table)
+}
+
+// check table exists in database dir by the specified table name.
+func (s *Spec) CheckTableExistsByName(tableName string) (bool, error) {
db, err := s.Connect()
if err != nil {
return false, err
}
- sql := fmt.Sprintf("SHOW TABLES FROM %s LIKE '%s'", s.Database, s.Table)
+ sql := fmt.Sprintf("SHOW TABLES FROM %s LIKE '%s'", utils.FormatKeywordName(s.Database), tableName)
rows, err := db.Query(sql)
if err != nil {
return false, xerror.Wrapf(err, xerror.Normal, "show tables failed, sql: %s", sql)
@@ -421,8 +703,35 @@ func (s *Spec) CheckTableExists() (bool, error) {
return table != "", nil
}
+func (s *Spec) CancelRestoreIfExists(snapshotName string) error {
+ log.Debugf("cancel restore %s, db name: %s", snapshotName, s.Database)
+
+ db, err := s.Connect()
+ if err != nil {
+ return err
+ }
+
+ info, err := s.queryRestoreInfo(db, snapshotName)
+ if err != nil {
+ return err
+ }
+
+ if info == nil || info.State == RestoreStateCancelled || info.State == RestoreStateFinished {
+ return nil
+ }
+
+ sql := fmt.Sprintf("CANCEL RESTORE FROM %s", utils.FormatKeywordName(s.Database))
+ log.Infof("cancel restore %s, sql: %s", snapshotName, sql)
+ _, err = db.Exec(sql)
+ if err != nil {
+ return xerror.Wrapf(err, xerror.Normal, "cancel restore failed, sql: %s", sql)
+ }
+ return nil
+}
+
+// Create a full snapshot of the specified tables, if tables is empty, backup the entire database.
// mysql> BACKUP SNAPSHOT ccr.snapshot_20230605 TO `__keep_on_local__` ON ( src_1 ) PROPERTIES ("type" = "full");
-func (s *Spec) CreateSnapshotAndWaitForDone(tables []string) (string, error) {
+func (s *Spec) CreateSnapshot(snapshotName string, tables []string) error {
if tables == nil {
tables = make([]string, 0)
}
@@ -430,152 +739,346 @@ func (s *Spec) CreateSnapshotAndWaitForDone(tables []string) (string, error) {
tables = append(tables, s.Table)
}
- var snapshotName string
var tableRefs string
if len(tables) == 1 {
- // snapshot name format "ccrs_${table}_${timestamp}"
// table refs = table
- snapshotName = fmt.Sprintf("ccrs_%s_%s_%d", s.Database, s.Table, time.Now().Unix())
- tableRefs = tables[0]
+ tableRefs = utils.FormatKeywordName(tables[0])
} else {
- // snapshot name format "ccrs_${db}_${timestamp}"
// table refs = tables.join(", ")
- snapshotName = fmt.Sprintf("ccrs_%s_%d", s.Database, time.Now().Unix())
- tableRefs = strings.Join(tables, ", ")
+ tableRefs = "`" + strings.Join(tables, "`,`") + "`"
}
- log.Infof("create snapshot %s.%s", s.Database, snapshotName)
+ // means source is a empty db, table number is 0, so backup the entire database
+ if tableRefs == "``" {
+ tableRefs = ""
+ } else {
+ tableRefs = fmt.Sprintf("ON ( %s )", tableRefs)
+ }
db, err := s.Connect()
if err != nil {
- return "", err
+ return err
}
- backupSnapshotSql := fmt.Sprintf("BACKUP SNAPSHOT %s.%s TO `__keep_on_local__` ON ( %s ) PROPERTIES (\"type\" = \"full\")", s.Database, snapshotName, tableRefs)
- log.Debugf("backup snapshot sql: %s", backupSnapshotSql)
+ backupSnapshotSql := fmt.Sprintf("BACKUP SNAPSHOT %s.%s TO `__keep_on_local__` %s PROPERTIES (\"type\" = \"full\")",
+ utils.FormatKeywordName(s.Database), utils.FormatKeywordName(snapshotName), tableRefs)
+ log.Infof("create snapshot %s.%s, backup snapshot sql: %s", s.Database, snapshotName, backupSnapshotSql)
_, err = db.Exec(backupSnapshotSql)
if err != nil {
- return "", xerror.Wrapf(err, xerror.Normal, "backup snapshot %s failed, sql: %s", snapshotName, backupSnapshotSql)
+ return xerror.Wrapf(err, xerror.Normal, "backup snapshot %s failed, sql: %s", snapshotName, backupSnapshotSql)
}
- backupFinished, err := s.CheckBackupFinished(snapshotName)
+ return nil
+}
+
+// mysql> BACKUP SNAPSHOT ccr.snapshot_20230605 TO `__keep_on_local__` ON (src_1 PARTITION (`p1`)) PROPERTIES ("type" = "full");
+func (s *Spec) CreatePartialSnapshot(snapshotName, table string, partitions []string) error {
+ if len(table) == 0 {
+ return xerror.Errorf(xerror.Normal, "source db is empty! you should have at least one table")
+ }
+
+ // table refs = table
+ tableRef := utils.FormatKeywordName(table)
+
+ log.Infof("create partial snapshot %s.%s", s.Database, snapshotName)
+
+ db, err := s.Connect()
if err != nil {
- return "", err
+ return err
}
- if !backupFinished {
- err = xerror.Errorf(xerror.Normal, "check backup state timeout, max try times: %d, sql: %s", MAX_CHECK_RETRY_TIMES, backupSnapshotSql)
- return "", err
+
+ partitionRefs := ""
+ if len(partitions) > 0 {
+ partitionRefs = " PARTITION (`" + strings.Join(partitions, "`,`") + "`)"
+ }
+ backupSnapshotSql := fmt.Sprintf(
+ "BACKUP SNAPSHOT %s.%s TO `__keep_on_local__` ON (%s%s) PROPERTIES (\"type\" = \"full\")",
+ utils.FormatKeywordName(s.Database), snapshotName, tableRef, partitionRefs)
+ log.Debugf("backup partial snapshot sql: %s", backupSnapshotSql)
+ _, err = db.Exec(backupSnapshotSql)
+ if err != nil {
+ if strings.Contains(err.Error(), "Unknown table") {
+ return ErrBackupTableNotFound
+ } else if strings.Contains(err.Error(), "Unknown partition") {
+ return ErrBackupPartitionNotFound
+ } else {
+ return xerror.Wrapf(err, xerror.Normal, "backup partial snapshot %s failed, sql: %s", snapshotName, backupSnapshotSql)
+ }
}
- return snapshotName, nil
+ return nil
}
// TODO: Add TaskErrMsg
-func (s *Spec) checkBackupFinished(snapshotName string) (BackupState, error) {
+func (s *Spec) checkBackupFinished(snapshotName string) (BackupState, string, error) {
log.Debugf("check backup state of snapshot %s", snapshotName)
db, err := s.Connect()
if err != nil {
- return BackupStateUnknown, err
+ return BackupStateUnknown, "", err
}
- sql := fmt.Sprintf("SHOW BACKUP FROM %s WHERE SnapshotName = \"%s\"", s.Database, snapshotName)
+ sql := fmt.Sprintf("SHOW BACKUP FROM %s WHERE SnapshotName = \"%s\"", utils.FormatKeywordName(s.Database), snapshotName)
log.Debugf("check backup state sql: %s", sql)
rows, err := db.Query(sql)
if err != nil {
- return BackupStateUnknown, xerror.Wrapf(err, xerror.Normal, "show backup failed, sql: %s", sql)
+ return BackupStateUnknown, "", xerror.Wrapf(err, xerror.Normal, "show backup failed, sql: %s", sql)
}
defer rows.Close()
- var backupStateStr string
if rows.Next() {
rowParser := utils.NewRowParser()
if err := rowParser.Parse(rows); err != nil {
- return BackupStateUnknown, xerror.Wrap(err, xerror.Normal, sql)
+ return BackupStateUnknown, "", xerror.Wrap(err, xerror.Normal, sql)
}
- backupStateStr, err = rowParser.GetString("State")
+
+ info, err := parseBackupInfo(rowParser)
if err != nil {
- return BackupStateUnknown, xerror.Wrap(err, xerror.Normal, sql)
+ return BackupStateUnknown, "", xerror.Wrap(err, xerror.Normal, sql)
}
- log.Infof("check snapshot %s backup state: [%v]", snapshotName, backupStateStr)
- return ParseBackupState(backupStateStr), nil
+ log.Infof("check snapshot %s backup state: [%v]", snapshotName, info.StateStr)
+ return info.State, info.Status, nil
}
- return BackupStateUnknown, xerror.Errorf(xerror.Normal, "no backup state found, sql: %s", sql)
+
+ if err := rows.Err(); err != nil {
+ return BackupStateUnknown, "", xerror.Wrapf(err, xerror.Normal, "check snapshot backup state, sql: %s", sql)
+ }
+
+ return BackupStateUnknown, "", xerror.Errorf(xerror.Normal, "no backup state found, sql: %s", sql)
}
func (s *Spec) CheckBackupFinished(snapshotName string) (bool, error) {
- log.Debug("check backup state", zap.String("database", s.Database))
+ log.Debugf("check backup state, spec: %s, snapshot: %s", s.String(), snapshotName)
- for i := 0; i < MAX_CHECK_RETRY_TIMES; i++ {
- if backupState, err := s.checkBackupFinished(snapshotName); err != nil {
- return false, err
- } else if backupState == BackupStateFinished {
- return true, nil
- } else if backupState == BackupStateCancelled {
- return false, xerror.Errorf(xerror.Normal, "backup failed or canceled")
- } else {
- // BackupStatePending, BackupStateUnknown
- time.Sleep(BACKUP_CHECK_DURATION)
+ // Retry network related error to avoid full sync when the target network is interrupted, process is restarted.
+ if backupState, status, err := s.checkBackupFinished(snapshotName); err != nil && !isNetworkRelated(err) {
+ return false, err
+ } else if err == nil && backupState == BackupStateFinished {
+ return true, nil
+ } else if err == nil && backupState == BackupStateCancelled {
+ return false, xerror.Errorf(xerror.Normal, "backup failed or canceled, backup status: %s", status)
+ } else {
+ // BackupStatePending, BackupStateUnknown or network related errors.
+ if err != nil {
+ log.Warnf("check backup state is failed, spec: %s, snapshot: %s, err: %v", s.String(), snapshotName, err)
}
+ return false, nil
+ }
+}
+
+// Get the valid (running or finished) backup job with a unique prefix to indicate
+// if a backup job needs to be issued again.
+func (s *Spec) GetValidBackupJob(snapshotNamePrefix string) (string, error) {
+ log.Debugf("get valid backup job if exists, database: %s, label prefix: %s", s.Database, snapshotNamePrefix)
+
+ db, err := s.Connect()
+ if err != nil {
+ return "", err
}
- return false, xerror.Errorf(xerror.Normal, "check backup state timeout, max try times: %d", MAX_CHECK_RETRY_TIMES)
+ query := fmt.Sprintf("SHOW BACKUP FROM %s WHERE SnapshotName LIKE \"%s%%\"",
+ utils.FormatKeywordName(s.Database), snapshotNamePrefix)
+ log.Infof("show backup state sql: %s", query)
+ rows, err := db.Query(query)
+ if err != nil {
+ return "", xerror.Wrap(err, xerror.Normal, "query backup state failed")
+ }
+ defer rows.Close()
+
+ labels := make([]string, 0)
+ for rows.Next() {
+ rowParser := utils.NewRowParser()
+ if err := rowParser.Parse(rows); err != nil {
+ return "", xerror.Wrap(err, xerror.Normal, "scan backup state failed")
+ }
+
+ info, err := parseBackupInfo(rowParser)
+ if err != nil {
+ return "", xerror.Wrap(err, xerror.Normal, "scan backup state failed")
+ }
+
+ log.Infof("check snapshot %s backup state [%v], create time: %s",
+ info.SnapshotName, info.StateStr, info.CreateTime)
+
+ if info.State == BackupStateCancelled {
+ continue
+ }
+
+ labels = append(labels, info.SnapshotName)
+ }
+
+ if err := rows.Err(); err != nil {
+ return "", xerror.Wrapf(err, xerror.Normal, "get valid backup job, sql: %s", query)
+ }
+
+ // Return the last one. Assume that the result of `SHOW BACKUP` is ordered by CreateTime in ascending order.
+ if len(labels) != 0 {
+ return labels[len(labels)-1], nil
+ }
+
+ return "", nil
}
-// TODO: Add TaskErrMsg
-func (s *Spec) checkRestoreFinished(snapshotName string) (RestoreState, error) {
- log.Debugf("check restore state %s", snapshotName)
+// Get the valid (running or finished) restore job with a unique prefix to indicate
+// if a restore job needs to be issued again.
+func (s *Spec) GetValidRestoreJob(snapshotNamePrefix string) (string, error) {
+ log.Debugf("get valid restore job if exists, label prefix: %s", snapshotNamePrefix)
db, err := s.Connect()
if err != nil {
- return RestoreStateUnknown, err
+ return "", err
+ }
+
+ query := fmt.Sprintf("SHOW RESTORE FROM %s WHERE Label LIKE \"%s%%\"",
+ utils.FormatKeywordName(s.Database), snapshotNamePrefix)
+ log.Infof("show restore state sql: %s", query)
+ rows, err := db.Query(query)
+ if err != nil {
+ return "", xerror.Wrap(err, xerror.Normal, "query restore state failed")
+ }
+ defer rows.Close()
+
+ labels := make([]string, 0)
+ for rows.Next() {
+ rowParser := utils.NewRowParser()
+ if err := rowParser.Parse(rows); err != nil {
+ return "", xerror.Wrap(err, xerror.Normal, "scan restore state failed")
+ }
+
+ info, err := parseRestoreInfo(rowParser)
+ if err != nil {
+ return "", xerror.Wrap(err, xerror.Normal, "scan restore state failed")
+ }
+
+ log.Infof("check snapshot %s restore state: [%v], create time: %s",
+ info.Label, info.StateStr, info.CreateTime)
+
+ if info.State == RestoreStateCancelled {
+ continue
+ }
+
+ labels = append(labels, info.Label)
}
- query := fmt.Sprintf("SHOW RESTORE FROM %s WHERE Label = \"%s\"", s.Database, snapshotName)
+ if err := rows.Err(); err != nil {
+ return "", xerror.Wrapf(err, xerror.Normal, "get valid restore job, sql: %s", query)
+ }
- log.Debugf("check restore state sql: %s", query)
+ // Return the last one. Assume that the result of `SHOW BACKUP` is ordered by CreateTime in ascending order.
+ if len(labels) != 0 {
+ return labels[len(labels)-1], nil
+ }
+
+ return "", nil
+}
+
+// query restore info, return nil if not found
+func (s *Spec) queryRestoreInfo(db *sql.DB, snapshotName string) (*RestoreInfo, error) {
+ query := fmt.Sprintf("SHOW RESTORE FROM %s WHERE Label = \"%s\"",
+ utils.FormatKeywordName(s.Database), snapshotName)
+
+ log.Debugf("query restore info sql: %s", query)
rows, err := db.Query(query)
if err != nil {
- return RestoreStateUnknown, xerror.Wrap(err, xerror.Normal, "query restore state failed")
+ return nil, xerror.Wrap(err, xerror.Normal, "query restore state failed")
}
defer rows.Close()
- var restoreStateStr string
if rows.Next() {
rowParser := utils.NewRowParser()
if err := rowParser.Parse(rows); err != nil {
- return RestoreStateUnknown, xerror.Wrap(err, xerror.Normal, "scan restore state failed")
+ return nil, xerror.Wrap(err, xerror.Normal, "scan restore state failed")
}
- restoreStateStr, err = rowParser.GetString("State")
+
+ info, err := parseRestoreInfo(rowParser)
if err != nil {
- return RestoreStateUnknown, xerror.Wrap(err, xerror.Normal, "scan restore state failed")
+ return nil, xerror.Wrap(err, xerror.Normal, "scan restore state failed")
}
- log.Infof("check snapshot %s restore state: [%v]", snapshotName, restoreStateStr)
+ log.Infof("query snapshot %s restore state: [%v], restore status: %s",
+ snapshotName, info.StateStr, info.Status)
- return _parseRestoreState(restoreStateStr), nil
+ return info, nil
}
- return RestoreStateUnknown, xerror.Errorf(xerror.Normal, "no restore state found")
+
+ if err := rows.Err(); err != nil {
+ return nil, xerror.Wrapf(err, xerror.Normal, "query restore info, sql: %s", query)
+ }
+
+ return nil, nil
+}
+
+func (s *Spec) checkRestoreFinished(snapshotName string) (RestoreState, string, error) {
+ log.Debugf("check restore state %s", snapshotName)
+
+ db, err := s.Connect()
+ if err != nil {
+ return RestoreStateUnknown, "", err
+ }
+
+ info, err := s.queryRestoreInfo(db, snapshotName)
+ if err != nil {
+ return RestoreStateUnknown, "", err
+ }
+
+ if info == nil {
+ return RestoreStateUnknown, "", xerror.Errorf(xerror.Normal, "no restore state found")
+ }
+
+ return info.State, info.Status, nil
}
func (s *Spec) CheckRestoreFinished(snapshotName string) (bool, error) {
- log.Debug("check restore is finished", zap.String("spec", s.String()), zap.String("snapshot", snapshotName))
+ log.Debugf("check restore state is finished, spec: %s, snapshot: %s", s.String(), snapshotName)
+
+ // Retry network related error to avoid full sync when the target network is interrupted, process is restarted.
+ if restoreState, status, err := s.checkRestoreFinished(snapshotName); err != nil && !isNetworkRelated(err) {
+ return false, err
+ } else if err == nil && restoreState == RestoreStateFinished {
+ return true, nil
+ } else if err == nil && restoreState == RestoreStateCancelled && strings.Contains(status, SIGNATURE_NOT_MATCHED) {
+ return false, xerror.XWrapf(ErrRestoreSignatureNotMatched, "restore failed, spec: %s, snapshot: %s, status: %s", s.String(), snapshotName, status)
+ } else if err == nil && restoreState == RestoreStateCancelled {
+ return false, xerror.Errorf(xerror.Normal, "restore failed or canceled, spec: %s, snapshot: %s, status: %s", s.String(), snapshotName, status)
+ } else {
+ // RestoreStatePending, RestoreStateUnknown or network error.
+ if err != nil {
+ log.Warnf("check restore state is failed, spec: %s, snapshot: %s, err: %v", s.String(), snapshotName, err)
+ }
+ return false, nil
+ }
+}
+
+func (s *Spec) GetRestoreSignatureNotMatchedTableOrView(snapshotName string) (string, bool, error) {
+ log.Debugf("get restore signature not matched table, spec: %s, snapshot: %s", s.String(), snapshotName)
for i := 0; i < MAX_CHECK_RETRY_TIMES; i++ {
- if backupState, err := s.checkRestoreFinished(snapshotName); err != nil {
- return false, err
- } else if backupState == RestoreStateFinished {
- return true, nil
- } else if backupState == RestoreStateCancelled {
- return false, xerror.Errorf(xerror.Normal, "backup failed or canceled, spec: %s, snapshot: %s", s.String(), snapshotName)
+ if restoreState, status, err := s.checkRestoreFinished(snapshotName); err != nil {
+ return "", false, err
+ } else if restoreState == RestoreStateFinished {
+ return "", false, nil
+ } else if restoreState == RestoreStateCancelled && strings.Contains(status, SIGNATURE_NOT_MATCHED) {
+ pattern := regexp.MustCompile("(?PTable|View) (?P.*) already exist but with different schema")
+ matches := pattern.FindStringSubmatch(status)
+ index := pattern.SubexpIndex("tableName")
+ if len(matches) == 0 || index == -1 || len(matches[index]) == 0 {
+ return "", false, xerror.Errorf(xerror.Normal, "match table name from restore status failed, spec: %s, snapshot: %s, status: %s", s.String(), snapshotName, status)
+ }
+
+ resource := matches[pattern.SubexpIndex("tableOrView")]
+ tableOrView := resource == "Table"
+ return matches[index], tableOrView, nil
+ } else if restoreState == RestoreStateCancelled {
+ return "", false, xerror.Errorf(xerror.Normal, "restore failed or canceled, spec: %s, snapshot: %s, status: %s", s.String(), snapshotName, status)
} else {
// RestoreStatePending, RestoreStateUnknown
time.Sleep(RESTORE_CHECK_DURATION)
}
}
- return false, xerror.Errorf(xerror.Normal, "check restore state timeout, max try times: %d, spec: %s, snapshot: %s", MAX_CHECK_RETRY_TIMES, s.String(), snapshotName)
+ log.Warnf("get restore signature not matched timeout, max try times: %d, spec: %s, snapshot: %s", MAX_CHECK_RETRY_TIMES, s, snapshotName)
+ return "", false, nil
}
func (s *Spec) waitTransactionDone(txnId int64) error {
@@ -589,7 +1092,7 @@ func (s *Spec) waitTransactionDone(txnId int64) error {
// WHERE
// [id=transaction_id]
// [label = label_name];
- query := fmt.Sprintf("SHOW TRANSACTION FROM %s WHERE id = %d", s.Database, txnId)
+ query := fmt.Sprintf("SHOW TRANSACTION FROM %s WHERE id = %d", utils.FormatKeywordName(s.Database), txnId)
log.Debugf("wait transaction done sql: %s", query)
rows, err := db.Query(query)
@@ -617,6 +1120,11 @@ func (s *Spec) waitTransactionDone(txnId int64) error {
return xerror.Errorf(xerror.Normal, "transaction %d status: %s", txnId, transactionStatus)
}
}
+
+ if err := rows.Err(); err != nil {
+ return xerror.Wrapf(err, xerror.Normal, "get transaction status failed, sql: %s", query)
+ }
+
return xerror.Errorf(xerror.Normal, "no transaction status found")
}
@@ -646,15 +1154,17 @@ func (s *Spec) Exec(sql string) error {
}
// Db Exec sql
-func (s *Spec) DbExec(sql string) error {
+func (s *Spec) DbExec(sqls ...string) error {
db, err := s.ConnectDB()
if err != nil {
return err
}
- _, err = db.Exec(sql)
- if err != nil {
- return xerror.Wrapf(err, xerror.Normal, "exec sql %s failed", sql)
+ for _, sql := range sqls {
+ _, err = db.Exec(sql)
+ if err != nil {
+ return xerror.Wrapf(err, xerror.Normal, "exec sql %s failed", sql)
+ }
}
return nil
}
@@ -701,3 +1211,294 @@ func (s *Spec) Update(event SpecEvent) {
break
}
}
+
+func (s *Spec) LightningSchemaChange(srcDatabase, tableAlias string, lightningSchemaChange *record.ModifyTableAddOrDropColumns) error {
+ log.Debugf("lightningSchemaChange %v", lightningSchemaChange)
+
+ rawSql := lightningSchemaChange.RawSql
+
+ // 1. remove database prefix
+ // "rawSql": "ALTER TABLE `default_cluster:ccr`.`test_ddl` ADD COLUMN `nid1` int(11) NULL COMMENT \"\""
+ // replace `default_cluster:${Src.Database}`.`test_ddl` to `test_ddl`
+ var sql string
+ if strings.Contains(rawSql, fmt.Sprintf("`default_cluster:%s`.", srcDatabase)) {
+ sql = strings.Replace(rawSql, fmt.Sprintf("`default_cluster:%s`.", srcDatabase), "", 1)
+ } else {
+ sql = strings.Replace(rawSql, fmt.Sprintf("`%s`.", srcDatabase), "", 1)
+ }
+
+ // 2. handle alias
+ if tableAlias != "" {
+ re := regexp.MustCompile("ALTER TABLE `[^`]*`")
+ sql = re.ReplaceAllString(sql, fmt.Sprintf("ALTER TABLE `%s`", tableAlias))
+ }
+
+ // 3. compatible REPLACE_IF_NOT_NULL NULL DEFAULT "null"
+ // See https://github.com/apache/doris/pull/41205 for details
+ sql = strings.Replace(sql, "REPLACE_IF_NOT_NULL NULL DEFAULT \"null\"",
+ "REPLACE_IF_NOT_NULL NULL DEFAULT NULL", 1)
+
+ log.Infof("lighting schema change sql, rawSql: %s, sql: %s", rawSql, sql)
+ return s.DbExec(sql)
+}
+
+func (s *Spec) RenameColumn(destTableName string, renameColumn *record.RenameColumn) error {
+ renameSql := fmt.Sprintf("ALTER TABLE `%s` RENAME COLUMN `%s` `%s`",
+ destTableName, renameColumn.ColName, renameColumn.NewColName)
+ log.Infof("rename column sql: %s", renameSql)
+ return s.DbExec(renameSql)
+}
+
+func (s *Spec) ModifyComment(destTableName string, modifyComment *record.ModifyComment) error {
+ var modifySql string
+ if modifyComment.Type == "COLUMN" {
+ var sb strings.Builder
+ sb.WriteString(fmt.Sprintf("ALTER TABLE `%s` ", destTableName))
+ first := true
+ for col, comment := range modifyComment.ColToComment {
+ if !first {
+ sb.WriteString(", ")
+ }
+ sb.WriteString(fmt.Sprintf("MODIFY COLUMN `%s` COMMENT '%s'", col, utils.EscapeStringValue(comment)))
+ first = false
+ }
+ modifySql = sb.String()
+ } else if modifyComment.Type == "TABLE" {
+ modifySql = fmt.Sprintf("ALTER TABLE `%s` MODIFY COMMENT '%s'", destTableName, utils.EscapeStringValue(modifyComment.TblComment))
+ } else {
+ return xerror.Errorf(xerror.Normal, "unsupported modify comment type: %s", modifyComment.Type)
+ }
+
+ log.Infof("modify comment sql: %s", modifySql)
+ return s.DbExec(modifySql)
+}
+
+func (s *Spec) TruncateTable(destTableName string, truncateTable *record.TruncateTable) error {
+ var sql string
+ if truncateTable.RawSql == "" {
+ sql = fmt.Sprintf("TRUNCATE TABLE %s", utils.FormatKeywordName(destTableName))
+ } else {
+ sql = fmt.Sprintf("TRUNCATE TABLE %s %s", utils.FormatKeywordName(destTableName), truncateTable.RawSql)
+ }
+
+ log.Infof("truncate table sql: %s", sql)
+
+ return s.DbExec(sql)
+}
+
+func (s *Spec) ReplaceTable(fromName, toName string, swap bool) error {
+ sql := fmt.Sprintf("ALTER TABLE %s REPLACE WITH TABLE %s PROPERTIES(\"swap\"=\"%t\")",
+ utils.FormatKeywordName(toName), utils.FormatKeywordName(fromName), swap)
+
+ log.Infof("replace table sql: %s", sql)
+
+ return s.DbExec(sql)
+}
+
+func (s *Spec) DropTable(tableName string, force bool) error {
+ sqlSuffix := ""
+ if force {
+ sqlSuffix = "FORCE"
+ }
+ dropSql := fmt.Sprintf("DROP TABLE %s %s", utils.FormatKeywordName(tableName), sqlSuffix)
+ log.Infof("drop table sql: %s", dropSql)
+ return s.DbExec(dropSql)
+}
+
+func (s *Spec) DropView(viewName string) error {
+ dropView := fmt.Sprintf("DROP VIEW IF EXISTS %s ", utils.FormatKeywordName(viewName))
+ log.Infof("drop view sql: %s", dropView)
+ return s.DbExec(dropView)
+}
+
+func (s *Spec) AlterViewDef(srcDatabase, viewName string, alterView *record.AlterView) error {
+ // 1. remove database prefix
+ // CREATE VIEW `view_test_1159493057` AS
+ // SELECT
+ // `internal`.`regression_test_db_sync_view_alter`.`tbl_duplicate_0_1159493057`.`user_id` AS `k1`,
+ // `internal`.`regression_test_db_sync_view_alter`.`tbl_duplicate_0_1159493057`.`name` AS `name`,
+ // MAX(`internal`.`regression_test_db_sync_view_alter`.`tbl_duplicate_0_1159493057`.`age`) AS `v1`
+ // FROM `internal`.`regression_test_db_sync_view_alter`.`tbl_duplicate_0_1159493057`
+ var def string
+ prefix := fmt.Sprintf("`internal`.`%s`.", srcDatabase)
+ if strings.Contains(alterView.InlineViewDef, prefix) {
+ def = strings.ReplaceAll(alterView.InlineViewDef, prefix, "")
+ } else {
+ prefix = fmt.Sprintf(" `%s`.", srcDatabase)
+ def = strings.ReplaceAll(alterView.InlineViewDef, prefix, " ")
+ }
+
+ viewName = utils.FormatKeywordName(viewName)
+ alterViewSql := fmt.Sprintf("ALTER VIEW %s AS %s", viewName, def)
+ log.Infof("alter view sql: %s", alterViewSql)
+ return s.DbExec(alterViewSql)
+}
+
+func (s *Spec) AddPartition(destTableName string, addPartition *record.AddPartition) error {
+ addPartitionSql := addPartition.GetSql(destTableName)
+ addPartitionSql = correctAddPartitionSql(addPartitionSql, addPartition)
+ log.Infof("add partition sql: %s, original sql: %s", addPartitionSql, addPartition.Sql)
+ return s.DbExec(addPartitionSql)
+}
+
+func (s *Spec) DropPartition(destTableName string, dropPartition *record.DropPartition) error {
+ destTableName = utils.FormatKeywordName(destTableName)
+ dropPartitionSql := fmt.Sprintf("ALTER TABLE %s %s", destTableName, dropPartition.Sql)
+ log.Infof("drop partition sql: %s", dropPartitionSql)
+ return s.DbExec(dropPartitionSql)
+}
+
+func (s *Spec) RenamePartition(destTableName, oldPartition, newPartition string) error {
+ destTableName = utils.FormatKeywordName(destTableName)
+ oldPartition = utils.FormatKeywordName(oldPartition)
+ newPartition = utils.FormatKeywordName(newPartition)
+ renamePartitionSql := fmt.Sprintf("ALTER TABLE %s RENAME PARTITION %s %s",
+ destTableName, oldPartition, newPartition)
+ log.Infof("rename partition sql: %s", renamePartitionSql)
+ return s.DbExec(renamePartitionSql)
+}
+
+func (s *Spec) LightningIndexChange(alias string, record *record.ModifyTableAddOrDropInvertedIndices) error {
+ rawSql := record.GetRawSql()
+ if len(record.AlternativeIndexes) == 0 {
+ return xerror.Errorf(xerror.Normal, "lightning index change job is empty, should not be here")
+ }
+
+ sql := fmt.Sprintf("ALTER TABLE %s", utils.FormatKeywordName(alias))
+ if record.IsDropInvertedIndex {
+ dropIndexes := []string{}
+ for _, index := range record.AlternativeIndexes {
+ if !index.IsInvertedIndex() {
+ return xerror.Errorf(xerror.Normal, "lightning index change job is not inverted index, should not be here")
+ }
+ indexName := utils.FormatKeywordName(index.GetIndexName())
+ dropIndexes = append(dropIndexes, fmt.Sprintf("DROP INDEX %s", indexName))
+ }
+ sql = fmt.Sprintf("%s %s", sql, strings.Join(dropIndexes, ", "))
+ } else {
+ addIndexes := []string{}
+ for _, index := range record.AlternativeIndexes {
+ if !index.IsInvertedIndex() {
+ return xerror.Errorf(xerror.Normal, "lightning index change job is not inverted index, should not be here")
+ }
+ columns := index.GetColumns()
+ columnsRef := fmt.Sprintf("(`%s`)", strings.Join(columns, "`,`"))
+ indexName := utils.FormatKeywordName(index.GetIndexName())
+ addIndex := fmt.Sprintf("ADD INDEX %s %s USING INVERTED COMMENT '%s'",
+ indexName, columnsRef, index.GetComment())
+ addIndexes = append(addIndexes, addIndex)
+ }
+ sql = fmt.Sprintf("%s %s", sql, strings.Join(addIndexes, ", "))
+ }
+
+ log.Infof("lighting index change sql, rawSql: %s, sql: %s", rawSql, sql)
+ return s.DbExec(sql)
+}
+
+func (s *Spec) BuildIndex(tableAlias string, buildIndex *record.IndexChangeJob) error {
+ if buildIndex.IsDropOp {
+ return xerror.Errorf(xerror.Normal, "build index job is drop op, should not be here")
+ }
+
+ if len(buildIndex.Indexes) != 1 {
+ return xerror.Errorf(xerror.Normal, "build index job has more than one index, should not be here")
+ }
+
+ index := buildIndex.Indexes[0]
+ indexName := index.GetIndexName()
+ sql := fmt.Sprintf("BUILD INDEX %s ON %s",
+ utils.FormatKeywordName(indexName), utils.FormatKeywordName(tableAlias))
+
+ if buildIndex.PartitionName != "" {
+ sqlWithPart := fmt.Sprintf("%s PARTITION (%s)", sql, utils.FormatKeywordName(buildIndex.PartitionName))
+
+ log.Infof("build index sql: %s", sqlWithPart)
+ err := s.DbExec(sqlWithPart)
+ if err == nil {
+ return nil
+ } else if !strings.Contains(err.Error(), "is not partitioned, cannot build index with partitions") {
+ return err
+ }
+
+ log.Infof("table %s is not partitioned, try to build index without partition", tableAlias)
+ }
+
+ log.Infof("build index sql: %s", sql)
+ return s.DbExec(sql)
+}
+
+func (s *Spec) RenameRollup(destTableName, oldRollup, newRollup string) error {
+ destTableName = utils.FormatKeywordName(destTableName)
+ oldRollup = utils.FormatKeywordName(oldRollup)
+ newRollup = utils.FormatKeywordName(newRollup)
+ renameRollupSql := fmt.Sprintf("ALTER TABLE %s RENAME ROLLUP %s %s",
+ destTableName, oldRollup, newRollup)
+ log.Infof("rename rollup sql: %s", renameRollupSql)
+ return s.DbExec(renameRollupSql)
+}
+
+func (s *Spec) DropRollup(destTableName, rollup string) error {
+ destTableName = utils.FormatKeywordName(destTableName)
+ rollup = utils.FormatKeywordName(rollup)
+ dropRollupSql := fmt.Sprintf("ALTER TABLE %s DROP ROLLUP %s", destTableName, rollup)
+ log.Infof("drop rollup sql: %s", dropRollupSql)
+ return s.DbExec(dropRollupSql)
+}
+
+func (s *Spec) DesyncTables(tables ...string) error {
+ var err error
+
+ failedTables := []string{}
+ for _, table := range tables {
+ desyncSql := fmt.Sprintf("ALTER TABLE %s SET (\"is_being_synced\"=\"false\")", utils.FormatKeywordName(table))
+ log.Debugf("db exec sql: %s", desyncSql)
+ if err = s.DbExec(desyncSql); err != nil {
+ failedTables = append(failedTables, table)
+ }
+ }
+
+ if len(failedTables) > 0 {
+ return xerror.Wrapf(err, xerror.FE, "failed tables: %s", strings.Join(failedTables, ","))
+ }
+
+ return nil
+}
+
+// Determine whether the error are network related, eg connection refused, connection reset, exposed from net packages.
+func isNetworkRelated(err error) bool {
+ msg := err.Error()
+
+ // The below errors are exposed from net packages.
+ // See https://github.com/golang/go/issues/23827 for details.
+ return strings.Contains(msg, "timeout awaiting response headers") ||
+ strings.Contains(msg, "connection refused") ||
+ strings.Contains(msg, "connection reset by peer") ||
+ strings.Contains(msg, "connection timeouted") ||
+ strings.Contains(msg, "i/o timeout")
+}
+
+func correctAddPartitionSql(addPartitionSql string, addPartition *record.AddPartition) string {
+ // HACK:
+ //
+ // The doris version before 2.1.3 and 2.0.10 did not handle unpartitioned and temporary
+ // partitions correctly, see https://github.com/apache/doris/pull/35461 for details.
+ //
+ // 1. fix unpartitioned add partition sql
+ // 2. support add temporary partition
+ if strings.Contains(addPartitionSql, "VALUES [(), ())") {
+ re := regexp.MustCompile(`VALUES \[\(\), \(\)\) \([^\)]+\)`)
+ addPartitionSql = re.ReplaceAllString(addPartitionSql, "")
+ }
+ if strings.Contains(addPartitionSql, "VALUES IN (((") {
+ re := regexp.MustCompile(`VALUES IN \(\(\((.*)\)\)\)`)
+ matches := re.FindStringSubmatch(addPartitionSql)
+ if len(matches) > 1 {
+ replace := fmt.Sprintf("VALUES IN ((%s))", matches[1])
+ addPartitionSql = re.ReplaceAllString(addPartitionSql, replace)
+ }
+ }
+ if addPartition.IsTemp && !strings.Contains(addPartitionSql, "ADD TEMPORARY PARTITION") {
+ addPartitionSql = strings.ReplaceAll(addPartitionSql, "ADD PARTITION", "ADD TEMPORARY PARTITION")
+ }
+ return addPartitionSql
+}
diff --git a/pkg/ccr/base/specer.go b/pkg/ccr/base/specer.go
index fcfb55de..d90a2064 100644
--- a/pkg/ccr/base/specer.go
+++ b/pkg/ccr/base/specer.go
@@ -1,8 +1,7 @@
package base
import (
- "database/sql"
-
+ "github.com/selectdb/ccr_syncer/pkg/ccr/record"
"github.com/selectdb/ccr_syncer/pkg/utils"
)
@@ -13,24 +12,52 @@ const (
httpNotFoundEvent SpecEvent = 1
)
+// this interface is used to for spec operation, treat it as a mysql dao
type Specer interface {
Valid() error
- Connect() (*sql.DB, error)
- ConnectDB() (*sql.DB, error)
IsDatabaseEnableBinlog() (bool, error)
IsTableEnableBinlog() (bool, error)
+ IsEnableRestoreSnapshotCompression() (bool, error)
GetAllTables() ([]string, error)
+ GetAllViewsFromTable(tableName string) ([]string, error)
ClearDB() error
CreateDatabase() error
- CreateTable(stmt string) error
+ CreateTableOrView(createTable *record.CreateTable, srcDatabase string) error
CheckDatabaseExists() (bool, error)
CheckTableExists() (bool, error)
- CreateSnapshotAndWaitForDone(tables []string) (string, error)
+ CheckTableExistsByName(tableName string) (bool, error)
+ GetValidBackupJob(snapshotNamePrefix string) (string, error)
+ GetValidRestoreJob(snapshotNamePrefix string) (string, error)
+ CancelRestoreIfExists(snapshotName string) error
+ CreatePartialSnapshot(snapshotName, table string, partitions []string) error
+ CreateSnapshot(snapshotName string, tables []string) error
+ CheckBackupFinished(snapshotName string) (bool, error)
CheckRestoreFinished(snapshotName string) (bool, error)
+ GetRestoreSignatureNotMatchedTableOrView(snapshotName string) (string, bool, error)
WaitTransactionDone(txnId int64) // busy wait
- Exec(sql string) error
- DbExec(sql string) error
+ LightningSchemaChange(srcDatabase string, tableAlias string, changes *record.ModifyTableAddOrDropColumns) error
+ RenameColumn(destTableName string, renameColumn *record.RenameColumn) error
+ RenameTable(destTableName string, renameTable *record.RenameTable) error
+ RenameTableWithName(destTableName, newName string) error
+ ModifyComment(destTableName string, modifyComment *record.ModifyComment) error
+ TruncateTable(destTableName string, truncateTable *record.TruncateTable) error
+ ReplaceTable(fromName, toName string, swap bool) error
+ DropTable(tableName string, force bool) error
+ DropView(viewName string) error
+ AlterViewDef(srcDatabase, viewName string, alterView *record.AlterView) error
+
+ AddPartition(destTableName string, addPartition *record.AddPartition) error
+ DropPartition(destTableName string, dropPartition *record.DropPartition) error
+ RenamePartition(destTableName, oldPartition, newPartition string) error
+
+ LightningIndexChange(tableAlias string, changes *record.ModifyTableAddOrDropInvertedIndices) error
+ BuildIndex(tableAlias string, buildIndex *record.IndexChangeJob) error
+
+ RenameRollup(destTableName, oldRollup, newRollup string) error
+ DropRollup(destTableName, rollupName string) error
+
+ DesyncTables(tables ...string) error
utils.Subject[SpecEvent]
}
diff --git a/pkg/ccr/base/specer_factory.go b/pkg/ccr/base/specer_factory.go
index 7b6bddbe..574d4f49 100644
--- a/pkg/ccr/base/specer_factory.go
+++ b/pkg/ccr/base/specer_factory.go
@@ -4,8 +4,7 @@ type SpecerFactory interface {
NewSpecer(tableSpec *Spec) Specer
}
-type SpecFactory struct {
-}
+type SpecFactory struct{}
func NewSpecerFactory() SpecerFactory {
return &SpecFactory{}
diff --git a/pkg/ccr/checker.go b/pkg/ccr/checker.go
index 0ae1dbed..43320fbf 100644
--- a/pkg/ccr/checker.go
+++ b/pkg/ccr/checker.go
@@ -132,7 +132,7 @@ func (c *Checker) check() error {
c.reset()
for {
- log.Debugf("checker state: %s", c.state.String())
+ log.Tracef("checker state: %s", c.state)
switch c.state {
case checkerStateRefresh:
c.handleRefresh()
diff --git a/pkg/ccr/errors.go b/pkg/ccr/errors.go
index 78d475dc..2dd5ea31 100644
--- a/pkg/ccr/errors.go
+++ b/pkg/ccr/errors.go
@@ -2,6 +2,4 @@ package ccr
import "github.com/selectdb/ccr_syncer/pkg/xerror"
-var (
- errBackendNotFound = xerror.XNew(xerror.Meta, "backend not found")
-)
+var errBackendNotFound = xerror.NewWithoutStack(xerror.Meta, "backend not found")
diff --git a/pkg/ccr/factory.go b/pkg/ccr/factory.go
index 5c2f7d4b..13800c16 100644
--- a/pkg/ccr/factory.go
+++ b/pkg/ccr/factory.go
@@ -6,15 +6,17 @@ import (
)
type Factory struct {
- RpcFactory rpc.IRpcFactory
- MetaFactory MetaerFactory
- ISpecFactory base.SpecerFactory
+ rpc.IRpcFactory
+ MetaerFactory
+ base.SpecerFactory
+ ThriftMetaFactory
}
-func NewFactory(rpcFactory rpc.IRpcFactory, metaFactory MetaerFactory, ISpecFactory base.SpecerFactory) *Factory {
+func NewFactory(rpcFactory rpc.IRpcFactory, metaFactory MetaerFactory, ISpecFactory base.SpecerFactory, thriftMetaFactory ThriftMetaFactory) *Factory {
return &Factory{
- RpcFactory: rpcFactory,
- MetaFactory: metaFactory,
- ISpecFactory: ISpecFactory,
+ IRpcFactory: rpcFactory,
+ MetaerFactory: metaFactory,
+ SpecerFactory: ISpecFactory,
+ ThriftMetaFactory: thriftMetaFactory,
}
}
diff --git a/pkg/ccr/fe_mock.go b/pkg/ccr/fe_mock.go
index dc180c11..57ab1674 100644
--- a/pkg/ccr/fe_mock.go
+++ b/pkg/ccr/fe_mock.go
@@ -1,9 +1,9 @@
// Code generated by MockGen. DO NOT EDIT.
-// Source: rpc/fe.go
+// Source: pkg/rpc/fe.go
//
// Generated by this command:
//
-// mockgen -source=rpc/fe.go -destination=ccr/fe_mock.go -package=ccr
+// mockgen -source=pkg/rpc/fe.go -destination=pkg/ccr/fe_mock.go -package=ccr
//
// Package ccr is a generated GoMock package.
package ccr
@@ -13,6 +13,7 @@ import (
base "github.com/selectdb/ccr_syncer/pkg/ccr/base"
frontendservice "github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/frontendservice"
+ status "github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/status"
types "github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/types"
gomock "go.uber.org/mock/gomock"
)
@@ -40,6 +41,20 @@ func (m *MockIFeRpc) EXPECT() *MockIFeRpcMockRecorder {
return m.recorder
}
+// Address mocks base method.
+func (m *MockIFeRpc) Address() string {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Address")
+ ret0, _ := ret[0].(string)
+ return ret0
+}
+
+// Address indicates an expected call of Address.
+func (mr *MockIFeRpcMockRecorder) Address() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Address", reflect.TypeOf((*MockIFeRpc)(nil).Address))
+}
+
// BeginTransaction mocks base method.
func (m *MockIFeRpc) BeginTransaction(arg0 *base.Spec, arg1 string, arg2 []int64) (*frontendservice.TBeginTxnResult_, error) {
m.ctrl.T.Helper()
@@ -70,6 +85,21 @@ func (mr *MockIFeRpcMockRecorder) CommitTransaction(arg0, arg1, arg2 any) *gomoc
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CommitTransaction", reflect.TypeOf((*MockIFeRpc)(nil).CommitTransaction), arg0, arg1, arg2)
}
+// GetBackends mocks base method.
+func (m *MockIFeRpc) GetBackends(spec *base.Spec) (*frontendservice.TGetBackendMetaResult_, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "GetBackends", spec)
+ ret0, _ := ret[0].(*frontendservice.TGetBackendMetaResult_)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// GetBackends indicates an expected call of GetBackends.
+func (mr *MockIFeRpcMockRecorder) GetBackends(spec any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBackends", reflect.TypeOf((*MockIFeRpc)(nil).GetBackends), spec)
+}
+
// GetBinlog mocks base method.
func (m *MockIFeRpc) GetBinlog(arg0 *base.Spec, arg1 int64) (*frontendservice.TGetBinlogResult_, error) {
m.ctrl.T.Helper()
@@ -100,11 +130,26 @@ func (mr *MockIFeRpcMockRecorder) GetBinlogLag(arg0, arg1 any) *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBinlogLag", reflect.TypeOf((*MockIFeRpc)(nil).GetBinlogLag), arg0, arg1)
}
+// GetDbMeta mocks base method.
+func (m *MockIFeRpc) GetDbMeta(spec *base.Spec) (*frontendservice.TGetMetaResult_, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "GetDbMeta", spec)
+ ret0, _ := ret[0].(*frontendservice.TGetMetaResult_)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// GetDbMeta indicates an expected call of GetDbMeta.
+func (mr *MockIFeRpcMockRecorder) GetDbMeta(spec any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDbMeta", reflect.TypeOf((*MockIFeRpc)(nil).GetDbMeta), spec)
+}
+
// GetMasterToken mocks base method.
-func (m *MockIFeRpc) GetMasterToken(arg0 *base.Spec) (string, error) {
+func (m *MockIFeRpc) GetMasterToken(arg0 *base.Spec) (*frontendservice.TGetMasterTokenResult_, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetMasterToken", arg0)
- ret0, _ := ret[0].(string)
+ ret0, _ := ret[0].(*frontendservice.TGetMasterTokenResult_)
ret1, _ := ret[1].(error)
return ret0, ret1
}
@@ -130,6 +175,21 @@ func (mr *MockIFeRpcMockRecorder) GetSnapshot(arg0, arg1 any) *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSnapshot", reflect.TypeOf((*MockIFeRpc)(nil).GetSnapshot), arg0, arg1)
}
+// GetTableMeta mocks base method.
+func (m *MockIFeRpc) GetTableMeta(spec *base.Spec, tableIds []int64) (*frontendservice.TGetMetaResult_, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "GetTableMeta", spec, tableIds)
+ ret0, _ := ret[0].(*frontendservice.TGetMetaResult_)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// GetTableMeta indicates an expected call of GetTableMeta.
+func (mr *MockIFeRpcMockRecorder) GetTableMeta(spec, tableIds any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTableMeta", reflect.TypeOf((*MockIFeRpc)(nil).GetTableMeta), spec, tableIds)
+}
+
// RestoreSnapshot mocks base method.
func (m *MockIFeRpc) RestoreSnapshot(arg0 *base.Spec, arg1 []*frontendservice.TTableRef, arg2 string, arg3 *frontendservice.TGetSnapshotResult_) (*frontendservice.TRestoreSnapshotResult_, error) {
m.ctrl.T.Helper()
@@ -160,6 +220,71 @@ func (mr *MockIFeRpcMockRecorder) RollbackTransaction(spec, txnId any) *gomock.C
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RollbackTransaction", reflect.TypeOf((*MockIFeRpc)(nil).RollbackTransaction), spec, txnId)
}
+// MockresultType is a mock of resultType interface.
+type MockresultType struct {
+ ctrl *gomock.Controller
+ recorder *MockresultTypeMockRecorder
+}
+
+// MockresultTypeMockRecorder is the mock recorder for MockresultType.
+type MockresultTypeMockRecorder struct {
+ mock *MockresultType
+}
+
+// NewMockresultType creates a new mock instance.
+func NewMockresultType(ctrl *gomock.Controller) *MockresultType {
+ mock := &MockresultType{ctrl: ctrl}
+ mock.recorder = &MockresultTypeMockRecorder{mock}
+ return mock
+}
+
+// EXPECT returns an object that allows the caller to indicate expected use.
+func (m *MockresultType) EXPECT() *MockresultTypeMockRecorder {
+ return m.recorder
+}
+
+// GetMasterAddress mocks base method.
+func (m *MockresultType) GetMasterAddress() *types.TNetworkAddress {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "GetMasterAddress")
+ ret0, _ := ret[0].(*types.TNetworkAddress)
+ return ret0
+}
+
+// GetMasterAddress indicates an expected call of GetMasterAddress.
+func (mr *MockresultTypeMockRecorder) GetMasterAddress() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetMasterAddress", reflect.TypeOf((*MockresultType)(nil).GetMasterAddress))
+}
+
+// GetStatus mocks base method.
+func (m *MockresultType) GetStatus() *status.TStatus {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "GetStatus")
+ ret0, _ := ret[0].(*status.TStatus)
+ return ret0
+}
+
+// GetStatus indicates an expected call of GetStatus.
+func (mr *MockresultTypeMockRecorder) GetStatus() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetStatus", reflect.TypeOf((*MockresultType)(nil).GetStatus))
+}
+
+// IsSetMasterAddress mocks base method.
+func (m *MockresultType) IsSetMasterAddress() bool {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "IsSetMasterAddress")
+ ret0, _ := ret[0].(bool)
+ return ret0
+}
+
+// IsSetMasterAddress indicates an expected call of IsSetMasterAddress.
+func (mr *MockresultTypeMockRecorder) IsSetMasterAddress() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsSetMasterAddress", reflect.TypeOf((*MockresultType)(nil).IsSetMasterAddress))
+}
+
// MockRequest is a mock of Request interface.
type MockRequest struct {
ctrl *gomock.Controller
diff --git a/pkg/ccr/ingest_binlog_job.go b/pkg/ccr/ingest_binlog_job.go
index bac78a2b..0d96ab7b 100644
--- a/pkg/ccr/ingest_binlog_job.go
+++ b/pkg/ccr/ingest_binlog_job.go
@@ -18,17 +18,30 @@ import (
log "github.com/sirupsen/logrus"
)
+var errNotFoundDestMappingTableId = xerror.NewWithoutStack(xerror.Meta, "not found dest mapping table id")
+
type commitInfosCollector struct {
commitInfos []*ttypes.TTabletCommitInfo
commitInfosLock sync.Mutex
}
+type subTxnInfosCollector struct {
+ subTxnidToCommitInfos map[int64]([]*ttypes.TTabletCommitInfo)
+ subTxnInfosLock sync.Mutex
+}
+
func newCommitInfosCollector() *commitInfosCollector {
return &commitInfosCollector{
commitInfos: make([]*ttypes.TTabletCommitInfo, 0),
}
}
+func newSubTxnInfosCollector() *subTxnInfosCollector {
+ return &subTxnInfosCollector{
+ subTxnidToCommitInfos: make(map[int64]([]*ttypes.TTabletCommitInfo)),
+ }
+}
+
func (cic *commitInfosCollector) appendCommitInfos(commitInfo ...*ttypes.TTabletCommitInfo) {
cic.commitInfosLock.Lock()
defer cic.commitInfosLock.Unlock()
@@ -36,6 +49,23 @@ func (cic *commitInfosCollector) appendCommitInfos(commitInfo ...*ttypes.TTablet
cic.commitInfos = append(cic.commitInfos, commitInfo...)
}
+func (stic *subTxnInfosCollector) appendSubTxnCommitInfos(stid int64, commitInfo ...*ttypes.TTabletCommitInfo) {
+ stic.subTxnInfosLock.Lock()
+ defer stic.subTxnInfosLock.Unlock()
+
+ if stic.subTxnidToCommitInfos == nil {
+ stic.subTxnidToCommitInfos = make(map[int64]([]*ttypes.TTabletCommitInfo))
+ }
+
+ tabletCommitInfos := stic.subTxnidToCommitInfos[stid]
+ if tabletCommitInfos == nil {
+ tabletCommitInfos = make([]*ttypes.TTabletCommitInfo, 0)
+ }
+
+ tabletCommitInfos = append(tabletCommitInfos, commitInfo...)
+ stic.subTxnidToCommitInfos[stid] = tabletCommitInfos
+}
+
func (cic *commitInfosCollector) CommitInfos() []*ttypes.TTabletCommitInfo {
cic.commitInfosLock.Lock()
defer cic.commitInfosLock.Unlock()
@@ -43,38 +73,31 @@ func (cic *commitInfosCollector) CommitInfos() []*ttypes.TTabletCommitInfo {
return cic.commitInfos
}
+func (stic *subTxnInfosCollector) SubTxnToCommitInfos() map[int64]([]*ttypes.TTabletCommitInfo) {
+ stic.subTxnInfosLock.Lock()
+ defer stic.subTxnInfosLock.Unlock()
+
+ return stic.subTxnidToCommitInfos
+}
+
type tabletIngestBinlogHandler struct {
ingestJob *IngestBinlogJob
binlogVersion int64
+ stid int64
srcTablet *TabletMeta
destTablet *TabletMeta
destPartitionId int64
+ destTableId int64
*commitInfosCollector
-
- err error
- errLock sync.Mutex
+ *subTxnInfosCollector
cancel atomic.Bool
wg sync.WaitGroup
}
-func (h *tabletIngestBinlogHandler) setError(err error) {
- h.errLock.Lock()
- defer h.errLock.Unlock()
-
- h.err = err
-}
-
-func (h *tabletIngestBinlogHandler) error() error {
- h.errLock.Lock()
- defer h.errLock.Unlock()
-
- return h.err
-}
-
// handle Replica
-func (h *tabletIngestBinlogHandler) handleReplica(destReplica *ReplicaMeta) bool {
+func (h *tabletIngestBinlogHandler) handleReplica(srcReplica, destReplica *ReplicaMeta) bool {
destReplicaId := destReplica.Id
log.Debugf("handle dest replica id: %d", destReplicaId)
@@ -84,6 +107,7 @@ func (h *tabletIngestBinlogHandler) handleReplica(destReplica *ReplicaMeta) bool
}
j := h.ingestJob
+ destStid := h.stid
binlogVersion := h.binlogVersion
srcTablet := h.srcTablet
destPartitionId := h.destPartitionId
@@ -95,30 +119,28 @@ func (h *tabletIngestBinlogHandler) handleReplica(destReplica *ReplicaMeta) bool
}
destTabletId := destReplica.TabletId
- destRpc, err := h.ingestJob.ccrJob.rpcFactory.NewBeRpc(destBackend)
+ destRpc, err := h.ingestJob.ccrJob.factory.NewBeRpc(destBackend)
if err != nil {
j.setError(err)
return false
}
- loadId := ttypes.NewTUniqueId()
- loadId.SetHi(-1)
- loadId.SetLo(-1)
-
- srcReplicas := srcTablet.ReplicaMetas
- // srcBackendIds := make([]int64, 0, srcReplicas.Len())
- iter := srcReplicas.Iter()
- if ok := iter.First(); !ok {
- j.setError(xerror.Errorf(xerror.Meta, "src replicas is empty"))
- return false
- }
- srcBackendId := iter.Value().BackendId
+ srcBackendId := srcReplica.BackendId
srcBackend := j.GetSrcBackend(srcBackendId)
if srcBackend == nil {
j.setError(xerror.XWrapf(errBackendNotFound, "backend id: %d", srcBackendId))
return false
}
+ loadId := ttypes.NewTUniqueId()
+ loadId.SetHi(-1)
+ loadId.SetLo(-1)
+
+ // for txn insert
+ txnId := j.txnId
+ if destStid != 0 {
+ txnId = destStid
+ }
req := &bestruct.TIngestBinlogRequest{
- TxnId: utils.ThriftValueWrapper(j.txnId),
+ TxnId: utils.ThriftValueWrapper(txnId),
RemoteTabletId: utils.ThriftValueWrapper[int64](srcTablet.Id),
BinlogVersion: utils.ThriftValueWrapper(binlogVersion),
RemoteHost: utils.ThriftValueWrapper(srcBackend.Host),
@@ -131,6 +153,7 @@ func (h *tabletIngestBinlogHandler) handleReplica(destReplica *ReplicaMeta) bool
TabletId: destTabletId,
BackendId: destBackend.Id,
}
+ cwind := h.ingestJob.ccrJob.concurrencyManager.GetWindow(destBackend.Id)
h.wg.Add(1)
go func() {
@@ -140,6 +163,9 @@ func (h *tabletIngestBinlogHandler) handleReplica(destReplica *ReplicaMeta) bool
gls.Set("job", j.ccrJob.Name)
defer gls.ResetGls(gls.GoID(), map[interface{}]interface{}{})
+ cwind.Acquire()
+ defer cwind.Release()
+
resp, err := destRpc.IngestBinlog(req)
if err != nil {
j.setError(err)
@@ -148,15 +174,20 @@ func (h *tabletIngestBinlogHandler) handleReplica(destReplica *ReplicaMeta) bool
log.Debugf("ingest resp: %v", resp)
if !resp.IsSetStatus() {
- err = xerror.Errorf(xerror.BE, "ingest resp status not set")
+ err = xerror.Errorf(xerror.BE, "ingest resp status not set, req: %+v", req)
j.setError(err)
return
} else if resp.Status.StatusCode != tstatus.TStatusCode_OK {
- err = xerror.Errorf(xerror.BE, "ingest resp status code: %v, msg: %v", resp.Status.StatusCode, resp.Status.ErrorMsgs)
+ err = xerror.Errorf(xerror.BE, "ingest error, req %v, resp status code: %v, msg: %v", req, resp.Status.StatusCode, resp.Status.ErrorMsgs)
j.setError(err)
return
} else {
h.appendCommitInfos(commitInfo)
+
+ // for txn insert
+ if destStid != 0 {
+ h.appendSubTxnCommitInfos(destStid, commitInfo)
+ }
}
}()
@@ -166,30 +197,74 @@ func (h *tabletIngestBinlogHandler) handleReplica(destReplica *ReplicaMeta) bool
func (h *tabletIngestBinlogHandler) handle() {
log.Debugf("handle tablet ingest binlog, src tablet id: %d, dest tablet id: %d", h.srcTablet.Id, h.destTablet.Id)
+ // all src replicas version > binlogVersion
+ srcReplicas := make([]*ReplicaMeta, 0, h.srcTablet.ReplicaMetas.Len())
+ h.srcTablet.ReplicaMetas.Scan(func(srcReplicaId int64, srcReplica *ReplicaMeta) bool {
+ if srcReplica.Version >= h.binlogVersion {
+ srcReplicas = append(srcReplicas, srcReplica)
+ }
+ return true
+ })
+
+ if len(srcReplicas) == 0 {
+ h.ingestJob.setError(xerror.Errorf(xerror.Meta, "no src replica version > %d", h.binlogVersion))
+ return
+ }
+
+ srcReplicaIndex := 0
h.destTablet.ReplicaMetas.Scan(func(destReplicaId int64, destReplica *ReplicaMeta) bool {
- return h.handleReplica(destReplica)
+ // round robbin
+ srcReplica := srcReplicas[srcReplicaIndex%len(srcReplicas)]
+ srcReplicaIndex++
+ return h.handleReplica(srcReplica, destReplica)
})
h.wg.Wait()
h.ingestJob.appendCommitInfos(h.CommitInfos()...)
+ // for txn insert
+ if h.stid != 0 {
+ commitInfos := h.SubTxnToCommitInfos()[h.stid]
+ h.ingestJob.appendSubTxnCommitInfos(h.stid, commitInfos...)
+ }
}
type IngestContext struct {
context.Context
txnId int64
tableRecords []*record.TableRecord
+ tableMapping map[int64]int64
+ stidMapping map[int64]int64
+}
+
+func NewIngestContext(txnId int64, tableRecords []*record.TableRecord, tableMapping map[int64]int64) *IngestContext {
+ return &IngestContext{
+ Context: context.Background(),
+ txnId: txnId,
+ tableRecords: tableRecords,
+ tableMapping: tableMapping,
+ }
}
-func NewIngestContext(txnId int64, tableRecords []*record.TableRecord) *IngestContext {
+func NewIngestContextForTxnInsert(txnId int64, tableRecords []*record.TableRecord,
+ tableMapping map[int64]int64, stidMapping map[int64]int64) *IngestContext {
return &IngestContext{
Context: context.Background(),
txnId: txnId,
tableRecords: tableRecords,
+ tableMapping: tableMapping,
+ stidMapping: stidMapping,
}
}
type IngestBinlogJob struct {
- ccrJob *Job // ccr job
+ ccrJob *Job // ccr job
+ factory *Factory
+
+ tableMapping map[int64]int64
+ srcMeta IngestBinlogMetaer
+ destMeta IngestBinlogMetaer
+ stidMap map[int64]int64
+
txnId int64
tableRecords []*record.TableRecord
@@ -199,6 +274,7 @@ type IngestBinlogJob struct {
tabletIngestJobs []*tabletIngestBinlogHandler
*commitInfosCollector
+ *subTxnInfosCollector
err error
errLock sync.RWMutex
@@ -214,11 +290,16 @@ func NewIngestBinlogJob(ctx context.Context, ccrJob *Job) (*IngestBinlogJob, err
}
return &IngestBinlogJob{
- ccrJob: ccrJob,
+ ccrJob: ccrJob,
+ factory: ccrJob.factory,
+
+ tableMapping: ingestCtx.tableMapping,
txnId: ingestCtx.txnId,
tableRecords: ingestCtx.tableRecords,
+ stidMap: ingestCtx.stidMapping,
commitInfosCollector: newCommitInfosCollector(),
+ subTxnInfosCollector: newSubTxnInfosCollector(),
}, nil
}
@@ -259,6 +340,7 @@ func (j *IngestBinlogJob) Error() error {
type prepareIndexArg struct {
binlogVersion int64
srcTableId int64
+ stid int64
srcPartitionId int64
destTableId int64
destPartitionId int64
@@ -271,14 +353,13 @@ func (j *IngestBinlogJob) prepareIndex(arg *prepareIndexArg) {
// Step 1: check tablets
log.Debugf("arg %+v", arg)
- job := j.ccrJob
- srcTablets, err := job.srcMeta.GetTablets(arg.srcTableId, arg.srcPartitionId, arg.srcIndexMeta.Id)
+ srcTablets, err := j.srcMeta.GetTablets(arg.srcTableId, arg.srcPartitionId, arg.srcIndexMeta.Id)
if err != nil {
j.setError(err)
return
}
- destTablets, err := job.destMeta.GetTablets(arg.destTableId, arg.destPartitionId, arg.destIndexMeta.Id)
+ destTablets, err := j.destMeta.GetTablets(arg.destTableId, arg.destPartitionId, arg.destIndexMeta.Id)
if err != nil {
j.setError(err)
return
@@ -312,12 +393,15 @@ func (j *IngestBinlogJob) prepareIndex(arg *prepareIndexArg) {
destTablet := destIter.Value()
tabletIngestBinlogHandler := &tabletIngestBinlogHandler{
ingestJob: j,
+ stid: arg.stid,
binlogVersion: arg.binlogVersion,
srcTablet: srcTablet,
destTablet: destTablet,
destPartitionId: arg.destPartitionId,
+ destTableId: arg.destTableId,
commitInfosCollector: newCommitInfosCollector(),
+ subTxnInfosCollector: newSubTxnInfosCollector(),
}
j.tabletIngestJobs = append(j.tabletIngestJobs, tabletIngestBinlogHandler)
@@ -335,7 +419,6 @@ func (j *IngestBinlogJob) preparePartition(srcTableId, destTableId int64, partit
// 还是要求一下和下游对齐的index length,这个是不可以recover的
// 思考那些是recover用的,主要就是tablet那块的
- // TODO(Drogon): add use Backup/Restore to handle this
if len(indexIds) == 0 {
j.setError(xerror.Errorf(xerror.Meta, "index ids is empty"))
return
@@ -345,19 +428,21 @@ func (j *IngestBinlogJob) preparePartition(srcTableId, destTableId int64, partit
srcPartitionId := partitionRecord.Id
srcPartitionRange := partitionRecord.Range
- destPartitionId, err := job.destMeta.GetPartitionIdByRange(destTableId, srcPartitionRange)
+ sourceStid := partitionRecord.Stid
+ stidMap := j.stidMap
+ destPartitionId, err := j.destMeta.GetPartitionIdByRange(destTableId, srcPartitionRange)
if err != nil {
j.setError(err)
return
}
// Step 1: check index id
- srcIndexIdMap, err := j.ccrJob.srcMeta.GetIndexIdMap(srcTableId, srcPartitionId)
+ srcIndexIdMap, err := j.srcMeta.GetIndexIdMap(srcTableId, srcPartitionId)
if err != nil {
j.setError(err)
return
}
- destIndexNameMap, err := j.ccrJob.destMeta.GetIndexNameMap(destTableId, destPartitionId)
+ destIndexNameMap, destBaseIndex, err := j.destMeta.GetIndexNameMap(destTableId, destPartitionId)
if err != nil {
j.setError(err)
return
@@ -367,12 +452,22 @@ func (j *IngestBinlogJob) preparePartition(srcTableId, destTableId int64, partit
srcIndexName := srcIndexMeta.Name
if ccrJob.SyncType == TableSync && srcIndexName == ccrJob.Src.Table {
return ccrJob.Dest.Table
+ } else if srcIndexMeta.IsBaseIndex {
+ return destBaseIndex.Name
} else {
return srcIndexName
}
}
for _, indexId := range indexIds {
+ if j.srcMeta.IsIndexDropped(indexId) {
+ continue
+ }
+ if featureFilterShadowIndexesUpsert {
+ if _, ok := j.ccrJob.progress.ShadowIndexes[indexId]; ok {
+ continue
+ }
+ }
srcIndexMeta, ok := srcIndexIdMap[indexId]
if !ok {
j.setError(xerror.Errorf(xerror.Meta, "index id %v not found in src meta", indexId))
@@ -380,8 +475,11 @@ func (j *IngestBinlogJob) preparePartition(srcTableId, destTableId int64, partit
}
srcIndexName := getSrcIndexName(job, srcIndexMeta)
+ log.Debugf("src idx id %d, name %s", indexId, srcIndexName)
if _, ok := destIndexNameMap[srcIndexName]; !ok {
- j.setError(xerror.Errorf(xerror.Meta, "index name %v not found in dest meta", srcIndexName))
+ j.setError(xerror.Errorf(xerror.Meta,
+ "index name %v not found in dest meta, is base index: %t, src index id: %d",
+ srcIndexName, srcIndexMeta.IsBaseIndex, indexId))
return
}
}
@@ -390,11 +488,23 @@ func (j *IngestBinlogJob) preparePartition(srcTableId, destTableId int64, partit
prepareIndexArg := prepareIndexArg{
binlogVersion: partitionRecord.Version,
srcTableId: srcTableId,
+ stid: stidMap[sourceStid],
srcPartitionId: srcPartitionId,
destTableId: destTableId,
destPartitionId: destPartitionId,
}
for _, indexId := range indexIds {
+ if j.srcMeta.IsIndexDropped(indexId) {
+ log.Infof("skip the dropped index %d", indexId)
+ continue
+ }
+ if featureFilterShadowIndexesUpsert {
+ if _, ok := j.ccrJob.progress.ShadowIndexes[indexId]; ok {
+ log.Infof("skip the shadow index %d", indexId)
+ continue
+ }
+ }
+
srcIndexMeta := srcIndexIdMap[indexId]
destIndexMeta := destIndexNameMap[getSrcIndexName(job, srcIndexMeta)]
prepareIndexArg.srcIndexMeta = srcIndexMeta
@@ -405,6 +515,10 @@ func (j *IngestBinlogJob) preparePartition(srcTableId, destTableId int64, partit
func (j *IngestBinlogJob) prepareTable(tableRecord *record.TableRecord) {
log.Debugf("tableRecord: %v", tableRecord)
+ if j.srcMeta.IsTableDropped(tableRecord.Id) {
+ log.Infof("skip the dropped table %d", tableRecord.Id)
+ return
+ }
if len(tableRecord.PartitionRecords) == 0 {
j.setError(xerror.Errorf(xerror.Meta, "partition records is empty"))
@@ -437,19 +551,21 @@ func (j *IngestBinlogJob) prepareTable(tableRecord *record.TableRecord) {
}
// Step 1: check all partitions in partition records are in src/dest cluster
- srcPartitionMap, err := job.srcMeta.GetPartitionRangeMap(srcTableId)
+ srcPartitionMap, err := j.srcMeta.GetPartitionRangeMap(srcTableId)
if err != nil {
j.setError(err)
return
}
- destPartitionMap, err := job.destMeta.GetPartitionRangeMap(destTableId)
+ destPartitionMap, err := j.destMeta.GetPartitionRangeMap(destTableId)
if err != nil {
j.setError(err)
return
}
for _, partitionRecord := range tableRecord.PartitionRecords {
+ if partitionRecord.IsTemp || j.srcMeta.IsPartitionDropped(partitionRecord.Id) {
+ continue
+ }
rangeKey := partitionRecord.Range
- // TODO(Improvment, Fix): this may happen after drop partition, can seek partition for more time, check from recycle bin
if _, ok := srcPartitionMap[rangeKey]; !ok {
err = xerror.Errorf(xerror.Meta, "partition range: %v not in src cluster", rangeKey)
j.setError(err)
@@ -464,6 +580,16 @@ func (j *IngestBinlogJob) prepareTable(tableRecord *record.TableRecord) {
// Step 2: prepare partitions
for _, partitionRecord := range tableRecord.PartitionRecords {
+ if partitionRecord.IsTemp {
+ log.Debugf("skip ingest binlog to an temp partition, id: %d range: %s, version: %d",
+ partitionRecord.Id, partitionRecord.Range, partitionRecord.Version)
+ continue
+ }
+ if j.srcMeta.IsPartitionDropped(partitionRecord.Id) {
+ log.Infof("skip the dropped partition %d, range: %s, version: %d",
+ partitionRecord.Id, partitionRecord.Range, partitionRecord.Version)
+ continue
+ }
j.preparePartition(srcTableId, destTableId, partitionRecord, tableRecord.IndexIds)
}
}
@@ -471,16 +597,14 @@ func (j *IngestBinlogJob) prepareTable(tableRecord *record.TableRecord) {
func (j *IngestBinlogJob) prepareBackendMap() {
log.Debug("prepareBackendMap")
- job := j.ccrJob
-
var err error
- j.srcBackendMap, err = job.srcMeta.GetBackendMap()
+ j.srcBackendMap, err = j.srcMeta.GetBackendMap()
if err != nil {
j.setError(err)
return
}
- j.destBackendMap, err = job.destMeta.GetBackendMap()
+ j.destBackendMap, err = j.destMeta.GetBackendMap()
if err != nil {
j.setError(err)
return
@@ -512,8 +636,68 @@ func (j *IngestBinlogJob) runTabletIngestJobs() {
j.wg.Wait()
}
+func (j *IngestBinlogJob) prepareMeta() {
+ log.Debug("prepareMeta")
+ srcTableIds := make([]int64, 0, len(j.tableRecords))
+ job := j.ccrJob
+ factory := j.factory
+
+ switch job.SyncType {
+ case DBSync:
+ for _, tableRecord := range j.tableRecords {
+ srcTableIds = append(srcTableIds, tableRecord.Id)
+ }
+ case TableSync:
+ srcTableIds = append(srcTableIds, job.Src.TableId)
+ default:
+ err := xerror.Panicf(xerror.Normal, "invalid sync type: %s", job.SyncType)
+ j.setError(err)
+ return
+ }
+
+ srcMeta, err := factory.NewThriftMeta(&job.Src, j.ccrJob.factory, srcTableIds)
+ if err != nil {
+ j.setError(err)
+ return
+ }
+
+ destTableIds := make([]int64, 0, len(j.tableRecords))
+ switch job.SyncType {
+ case DBSync:
+ for _, srcTableId := range srcTableIds {
+ if destTableId, ok := j.tableMapping[srcTableId]; ok {
+ destTableIds = append(destTableIds, destTableId)
+ } else {
+ err := xerror.XWrapf(errNotFoundDestMappingTableId, "src table id: %d", srcTableId)
+ j.setError(err)
+ return
+ }
+ }
+ case TableSync:
+ destTableIds = append(destTableIds, job.Dest.TableId)
+ default:
+ err := xerror.Panicf(xerror.Normal, "invalid sync type: %s", job.SyncType)
+ j.setError(err)
+ return
+ }
+
+ destMeta, err := factory.NewThriftMeta(&job.Dest, j.ccrJob.factory, destTableIds)
+ if err != nil {
+ j.setError(err)
+ return
+ }
+
+ j.srcMeta = srcMeta
+ j.destMeta = destMeta
+}
+
// TODO(Drogon): use monad error handle
func (j *IngestBinlogJob) Run() {
+ j.prepareMeta()
+ if err := j.Error(); err != nil {
+ return
+ }
+
j.prepareBackendMap()
if err := j.Error(); err != nil {
return
diff --git a/pkg/ccr/job.go b/pkg/ccr/job.go
index d47f3317..cb35ee68 100644
--- a/pkg/ccr/job.go
+++ b/pkg/ccr/job.go
@@ -1,16 +1,17 @@
package ccr
-// TODO: rewrite by state machine, such as first sync, full/incremental sync
-
import (
"context"
"encoding/json"
"errors"
+ "flag"
"fmt"
"math"
"math/rand"
+ "regexp"
"strings"
"sync"
+ "sync/atomic"
"time"
"github.com/selectdb/ccr_syncer/pkg/ccr/base"
@@ -19,6 +20,7 @@ import (
"github.com/selectdb/ccr_syncer/pkg/storage"
utils "github.com/selectdb/ccr_syncer/pkg/utils"
"github.com/selectdb/ccr_syncer/pkg/xerror"
+ "github.com/selectdb/ccr_syncer/pkg/xmetrics"
festruct "github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/frontendservice"
tstatus "github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/status"
@@ -27,13 +29,50 @@ import (
_ "github.com/go-sql-driver/mysql"
"github.com/modern-go/gls"
log "github.com/sirupsen/logrus"
- "go.uber.org/zap"
)
const (
SYNC_DURATION = time.Second * 3
)
+var (
+ featureSchemaChangePartialSync bool
+ featureCleanTableAndPartitions bool
+ featureAtomicRestore bool
+ featureCreateViewDropExists bool
+ featureReplaceNotMatchedWithAlias bool
+ featureFilterShadowIndexesUpsert bool
+ featureReuseRunningBackupRestoreJob bool
+ featureCompressedSnapshot bool
+ featureSkipRollupBinlogs bool
+ featureTxnInsert bool
+)
+
+func init() {
+ flag.BoolVar(&featureSchemaChangePartialSync, "feature_schema_change_partial_sync", true,
+ "use partial sync when working with schema change")
+
+ // The default value is false, since clean tables will erase views unexpectedly.
+ flag.BoolVar(&featureCleanTableAndPartitions, "feature_clean_table_and_partitions", false,
+ "clean non restored tables and partitions during fullsync")
+ flag.BoolVar(&featureAtomicRestore, "feature_atomic_restore", true,
+ "replace tables in atomic during fullsync (otherwise the dest table will not be able to read).")
+ flag.BoolVar(&featureCreateViewDropExists, "feature_create_view_drop_exists", true,
+ "drop the exists view if exists, when sync the creating view binlog")
+ flag.BoolVar(&featureReplaceNotMatchedWithAlias, "feature_replace_not_matched_with_alias", true,
+ "replace signature not matched tables with table alias during the full sync")
+ flag.BoolVar(&featureFilterShadowIndexesUpsert, "feature_filter_shadow_indexes_upsert", true,
+ "filter the upsert to the shadow indexes")
+ flag.BoolVar(&featureReuseRunningBackupRestoreJob, "feature_reuse_running_backup_restore_job", true,
+ "reuse the running backup/restore issued by the job self")
+ flag.BoolVar(&featureCompressedSnapshot, "feature_compressed_snapshot", true,
+ "compress the snapshot job info and meta")
+ flag.BoolVar(&featureSkipRollupBinlogs, "feature_skip_rollup_binlogs", false,
+ "skip the rollup related binlogs")
+ flag.BoolVar(&featureTxnInsert, "feature_txn_insert", false,
+ "enable txn insert support")
+}
+
type SyncType int
const (
@@ -71,42 +110,44 @@ func (j JobState) String() string {
}
}
-// TODO: refactor merge Src && Isrc, Dest && IDest
type Job struct {
- SyncType SyncType `json:"sync_type"`
- Name string `json:"name"`
- Src base.Spec `json:"src"`
- ISrc base.Specer `json:"-"`
- srcMeta Metaer `json:"-"`
- Dest base.Spec `json:"dest"`
- IDest base.Specer `json:"-"`
- destMeta Metaer `json:"-"`
- State JobState `json:"state"`
- destSrcTableIdMap map[int64]int64 `json:"-"`
- progress *JobProgress `json:"-"`
- db storage.DB `json:"-"`
- jobFactory *JobFactory `json:"-"`
- rpcFactory rpc.IRpcFactory `json:"-"`
- stop chan struct{} `json:"-"`
- lock sync.Mutex `json:"-"`
+ SyncType SyncType `json:"sync_type"`
+ Name string `json:"name"`
+ Src base.Spec `json:"src"`
+ ISrc base.Specer `json:"-"`
+ srcMeta Metaer `json:"-"`
+ Dest base.Spec `json:"dest"`
+ IDest base.Specer `json:"-"`
+ destMeta Metaer `json:"-"`
+ SkipError bool `json:"skip_error"`
+ State JobState `json:"state"`
+
+ factory *Factory `json:"-"`
+
+ allowTableExists bool `json:"-"` // Only for FirstRun(), don't need to persist.
+ forceFullsync bool `json:"-"` // Force job step fullsync, for test only.
+
+ progress *JobProgress `json:"-"`
+ db storage.DB `json:"-"`
+ jobFactory *JobFactory `json:"-"`
+ rawStatus RawJobStatus `json:"-"`
+
+ stop chan struct{} `json:"-"`
+ isDeleted atomic.Bool `json:"-"`
+
+ concurrencyManager *rpc.ConcurrencyManager `json:"-"`
+
+ lock sync.Mutex `json:"-"`
}
type JobContext struct {
context.Context
- src base.Spec
- dest base.Spec
- db storage.DB
- factory *Factory
-}
-
-func NewJobContext(src, dest base.Spec, db storage.DB, factory *Factory) *JobContext {
- return &JobContext{
- Context: context.Background(),
- src: src,
- dest: dest,
- db: db,
- factory: factory,
- }
+ Src base.Spec
+ Dest base.Spec
+ Db storage.DB
+ SkipError bool
+ AllowTableExists bool
+ Factory *Factory
}
// new job
@@ -116,23 +157,29 @@ func NewJobFromService(name string, ctx context.Context) (*Job, error) {
return nil, xerror.Errorf(xerror.Normal, "invalid context type: %T", ctx)
}
- metaFactory := jobContext.factory.MetaFactory
- iSpecFactory := jobContext.factory.ISpecFactory
- src := jobContext.src
- dest := jobContext.dest
+ factory := jobContext.Factory
+ src := jobContext.Src
+ dest := jobContext.Dest
job := &Job{
- Name: name,
- Src: src,
- ISrc: iSpecFactory.NewSpecer(&src),
- srcMeta: metaFactory.NewMeta(&jobContext.src),
- Dest: dest,
- IDest: iSpecFactory.NewSpecer(&dest),
- destMeta: metaFactory.NewMeta(&jobContext.dest),
- State: JobRunning,
- destSrcTableIdMap: make(map[int64]int64),
- progress: nil,
- db: jobContext.db,
- stop: make(chan struct{}),
+ Name: name,
+ Src: src,
+ ISrc: factory.NewSpecer(&src),
+ srcMeta: factory.NewMeta(&jobContext.Src),
+ Dest: dest,
+ IDest: factory.NewSpecer(&dest),
+ destMeta: factory.NewMeta(&jobContext.Dest),
+ SkipError: jobContext.SkipError,
+ State: JobRunning,
+
+ allowTableExists: jobContext.AllowTableExists,
+ factory: factory,
+ forceFullsync: false,
+
+ progress: nil,
+ db: jobContext.Db,
+ stop: make(chan struct{}),
+
+ concurrencyManager: rpc.NewConcurrencyManager(),
}
if err := job.valid(); err != nil {
@@ -146,7 +193,6 @@ func NewJobFromService(name string, ctx context.Context) (*Job, error) {
}
job.jobFactory = NewJobFactory()
- job.rpcFactory = jobContext.factory.RpcFactory
return job, nil
}
@@ -157,16 +203,18 @@ func NewJobFromJson(jsonData string, db storage.DB, factory *Factory) (*Job, err
if err != nil {
return nil, xerror.Wrapf(err, xerror.Normal, "unmarshal json failed, json: %s", jsonData)
}
- job.ISrc = factory.ISpecFactory.NewSpecer(&job.Src)
- job.IDest = factory.ISpecFactory.NewSpecer(&job.Dest)
- job.srcMeta = factory.MetaFactory.NewMeta(&job.Src)
- job.destMeta = factory.MetaFactory.NewMeta(&job.Dest)
- job.destSrcTableIdMap = make(map[int64]int64)
+
+ // recover all not json fields
+ job.factory = factory
+ job.ISrc = factory.NewSpecer(&job.Src)
+ job.IDest = factory.NewSpecer(&job.Dest)
+ job.srcMeta = factory.NewMeta(&job.Src)
+ job.destMeta = factory.NewMeta(&job.Dest)
job.progress = nil
job.db = db
job.stop = make(chan struct{})
job.jobFactory = NewJobFactory()
- job.rpcFactory = factory.RpcFactory
+ job.concurrencyManager = rpc.NewConcurrencyManager()
return &job, nil
}
@@ -200,13 +248,11 @@ func (j *Job) valid() error {
}
func (j *Job) RecoverDatabaseSync() error {
- // TODO(Drogon): impl
return nil
}
// database old data sync
func (j *Job) DatabaseOldDataSync() error {
- // TODO(Drogon): impl
// Step 1: drop all tables
err := j.IDest.ClearDB()
if err != nil {
@@ -220,16 +266,16 @@ func (j *Job) DatabaseOldDataSync() error {
// database sync
func (j *Job) DatabaseSync() error {
- // TODO(Drogon): impl
return nil
}
func (j *Job) genExtraInfo() (*base.ExtraInfo, error) {
meta := j.srcMeta
- masterToken, err := meta.GetMasterToken(j.rpcFactory)
+ masterToken, err := meta.GetMasterToken(j.factory)
if err != nil {
return nil, err
}
+ log.Infof("gen extra info with master token %s", masterToken)
backends, err := meta.GetBackends()
if err != nil {
@@ -240,7 +286,7 @@ func (j *Job) genExtraInfo() (*base.ExtraInfo, error) {
beNetworkMap := make(map[int64]base.NetworkAddr)
for _, backend := range backends {
- log.Infof("backend: %v", backend)
+ log.Infof("gen extra info with backend: %v", backend)
addr := base.NetworkAddr{
Ip: backend.Host,
Port: backend.HttpPort,
@@ -255,166 +301,297 @@ func (j *Job) genExtraInfo() (*base.ExtraInfo, error) {
}
func (j *Job) isIncrementalSync() bool {
- return j.progress.SyncState == DBIncrementalSync || j.progress.SyncState == TableIncrementalSync
+ switch j.progress.SyncState {
+ case TableIncrementalSync, DBIncrementalSync, DBTablesIncrementalSync:
+ return true
+ default:
+ return false
+ }
}
-func (j *Job) fullSync() error {
+func (j *Job) isTableSyncWithAlias() bool {
+ return j.SyncType == TableSync && j.Src.Table != j.Dest.Table
+}
+
+func (j *Job) isTableDropped(tableId int64) (bool, error) {
+ // Keep compatible with the old version, which doesn't have the table id in partial sync data.
+ if tableId == 0 {
+ return false, nil
+ }
+
+ var tableIds = []int64{tableId}
+ srcMeta, err := j.factory.NewThriftMeta(&j.Src, j.factory, tableIds)
+ if err != nil {
+ return false, err
+ }
+
+ return srcMeta.IsTableDropped(tableId), nil
+}
+
+func (j *Job) addExtraInfo(jobInfo []byte) ([]byte, error) {
+ var jobInfoMap map[string]interface{}
+ err := json.Unmarshal(jobInfo, &jobInfoMap)
+ if err != nil {
+ return nil, xerror.Wrapf(err, xerror.Normal, "unmarshal jobInfo failed, jobInfo: %s", string(jobInfo))
+ }
+
+ extraInfo, err := j.genExtraInfo()
+ if err != nil {
+ return nil, err
+ }
+ log.Debugf("extraInfo: %v", extraInfo)
+ jobInfoMap["extra_info"] = extraInfo
+
+ jobInfoBytes, err := json.Marshal(jobInfoMap)
+ if err != nil {
+ return nil, xerror.Errorf(xerror.Normal, "marshal jobInfo failed, jobInfo: %v", jobInfoMap)
+ }
+
+ return jobInfoBytes, nil
+}
+
+func (j *Job) handlePartialSyncTableNotFound() error {
+ tableId := j.progress.PartialSyncData.TableId
+ table := j.progress.PartialSyncData.Table
+
+ if dropped, err := j.isTableDropped(tableId); err != nil {
+ return err
+ } else if dropped {
+ // skip this partial sync because table has been dropped
+ log.Warnf("skip this partial sync because table %s has been dropped, table id: %d", table, tableId)
+ nextCommitSeq := j.progress.CommitSeq
+ if j.SyncType == DBSync {
+ j.progress.NextWithPersist(nextCommitSeq, DBIncrementalSync, Done, "")
+ } else {
+ j.progress.NextWithPersist(nextCommitSeq, TableIncrementalSync, Done, "")
+ }
+ return nil
+ } else if newTableName, err := j.srcMeta.GetTableNameById(tableId); err != nil {
+ return err
+ } else if j.SyncType == DBSync {
+ // The table might be renamed, so we need to update the table name.
+ log.Warnf("force new partial snapshot, since table %d has renamed from %s to %s", tableId, table, newTableName)
+ replace := true // replace the old data to avoid blocking reading
+ return j.newPartialSnapshot(tableId, newTableName, nil, replace)
+ } else {
+ return xerror.Errorf(xerror.Normal, "table sync but table has renamed from %s to %s, table id %d",
+ table, newTableName, tableId)
+ }
+}
+
+// Like fullSync, but only backup and restore partial of the partitions of a table.
+func (j *Job) partialSync() error {
type inMemoryData struct {
SnapshotName string `json:"snapshot_name"`
SnapshotResp *festruct.TGetSnapshotResult_ `json:"snapshot_resp"`
TableCommitSeqMap map[int64]int64 `json:"table_commit_seq_map"`
+ TableNameMapping map[int64]string `json:"table_name_mapping"`
+ RestoreLabel string `json:"restore_label"`
+ }
+
+ if j.progress.PartialSyncData == nil {
+ return xerror.Errorf(xerror.Normal, "run partial sync but data is nil")
}
- // TODO: snapshot machine, not need create snapshot each time
- // TODO(Drogon): check last snapshot commitSeq > first commitSeq, maybe we can reuse this snapshot
+ tableId := j.progress.PartialSyncData.TableId
+ table := j.progress.PartialSyncData.Table
+ partitions := j.progress.PartialSyncData.Partitions
switch j.progress.SubSyncState {
case Done:
- if err := j.newSnapshot(j.progress.CommitSeq); err != nil {
+ log.Infof("partial sync status: done")
+ withAlias := len(j.progress.TableAliases) > 0
+ if err := j.newPartialSnapshot(tableId, table, partitions, withAlias); err != nil {
return err
}
case BeginCreateSnapshot:
// Step 1: Create snapshot
- log.Infof("fullsync status: create snapshot")
+ prefix := NewPartialSnapshotLabelPrefix(j.Name, j.progress.SyncId)
+ log.Infof("partial sync status: create snapshot with prefix %s", prefix)
- backupTableList := make([]string, 0)
- switch j.SyncType {
- case DBSync:
- tables, err := j.srcMeta.GetTables()
+ if featureReuseRunningBackupRestoreJob {
+ snapshotName, err := j.ISrc.GetValidBackupJob(prefix)
if err != nil {
return err
}
- for _, table := range tables {
- backupTableList = append(backupTableList, table.Name)
+ if snapshotName != "" {
+ log.Infof("partial sync status: find a valid backup job %s", snapshotName)
+ j.progress.NextSubVolatile(WaitBackupDone, snapshotName)
+ return nil
}
- case TableSync:
- backupTableList = append(backupTableList, j.Src.Table)
- default:
- return xerror.Errorf(xerror.Normal, "invalid sync type %s", j.SyncType)
}
- snapshotName, err := j.ISrc.CreateSnapshotAndWaitForDone(backupTableList)
+
+ snapshotName := NewLabelWithTs(prefix)
+ err := j.ISrc.CreatePartialSnapshot(snapshotName, table, partitions)
+ if err != nil && err == base.ErrBackupPartitionNotFound {
+ log.Warnf("partial sync status: partition not found in the upstream, step to table partial sync")
+ replace := true // replace the old data to avoid blocking reading
+ return j.newPartialSnapshot(tableId, table, nil, replace)
+ } else if err != nil && err == base.ErrBackupTableNotFound {
+ return j.handlePartialSyncTableNotFound()
+ } else if err != nil {
+ return err
+ }
+
+ j.progress.NextSubVolatile(WaitBackupDone, snapshotName)
+ return nil
+
+ case WaitBackupDone:
+ // Step 2: Wait backup job done
+ snapshotName := j.progress.InMemoryData.(string)
+ backupFinished, err := j.ISrc.CheckBackupFinished(snapshotName)
if err != nil {
+ j.progress.NextSubVolatile(BeginCreateSnapshot, snapshotName)
return err
}
+ if !backupFinished {
+ log.Infof("partial sync status: backup job %s is running", snapshotName)
+ return nil
+ }
+
j.progress.NextSubCheckpoint(GetSnapshotInfo, snapshotName)
case GetSnapshotInfo:
- // Step 2: Get snapshot info
- log.Infof("fullsync status: get snapshot info")
+ // Step 3: Get snapshot info
+ log.Infof("partial sync status: get snapshot info")
snapshotName := j.progress.PersistData
src := &j.Src
- srcRpc, err := j.rpcFactory.NewFeRpc(src)
+ srcRpc, err := j.factory.NewFeRpc(src)
if err != nil {
return err
}
- log.Debugf("begin get snapshot %s", snapshotName)
- snapshotResp, err := srcRpc.GetSnapshot(src, snapshotName)
+ log.Debugf("partial sync begin get snapshot %s", snapshotName)
+ compress := false // partial snapshot no need to compress
+ snapshotResp, err := srcRpc.GetSnapshot(src, snapshotName, compress)
if err != nil {
return err
}
- if snapshotResp.Status.GetStatusCode() != tstatus.TStatusCode_OK {
- log.Errorf("get snapshot failed, status: %v", snapshotResp.Status)
+ if snapshotResp.Status.GetStatusCode() == tstatus.TStatusCode_SNAPSHOT_NOT_EXIST ||
+ snapshotResp.Status.GetStatusCode() == tstatus.TStatusCode_SNAPSHOT_EXPIRED {
+ log.Warnf("get snapshot %s: %s (%s), retry with new partial sync", snapshotName,
+ utils.FirstOr(snapshotResp.Status.GetErrorMsgs(), "unknown"),
+ snapshotResp.Status.GetStatusCode())
+ replace := len(j.progress.TableAliases) > 0
+ return j.newPartialSnapshot(tableId, table, partitions, replace)
+ } else if snapshotResp.Status.GetStatusCode() != tstatus.TStatusCode_OK {
+ err = xerror.Errorf(xerror.FE, "get snapshot failed, status: %v", snapshotResp.Status)
+ return err
}
- log.Debugf("job: %s", string(snapshotResp.GetJobInfo()))
if !snapshotResp.IsSetJobInfo() {
return xerror.New(xerror.Normal, "jobInfo is not set")
}
- tableCommitSeqMap, err := ExtractTableCommitSeqMap(snapshotResp.GetJobInfo())
+ log.Tracef("job: %.128s", snapshotResp.GetJobInfo())
+
+ backupJobInfo, err := NewBackupJobInfoFromJson(snapshotResp.GetJobInfo())
if err != nil {
return err
}
- if j.SyncType == TableSync {
- if _, ok := tableCommitSeqMap[j.Src.TableId]; !ok {
- return xerror.Errorf(xerror.Normal, "tableid %d, commit seq not found", j.Src.TableId)
+ tableCommitSeqMap := backupJobInfo.TableCommitSeqMap
+ tableNameMapping := backupJobInfo.TableNameMapping()
+ log.Debugf("table commit seq map: %v, table name mapping: %v", tableCommitSeqMap, tableNameMapping)
+ if backupObject, ok := backupJobInfo.BackupObjects[table]; !ok {
+ return xerror.Errorf(xerror.Normal, "table %s not found in backup objects", table)
+ } else if backupObject.Id != tableId {
+ log.Warnf("partial sync table %s id not match, force full sync. table id %d, backup object id %d",
+ table, tableId, backupObject.Id)
+ if j.SyncType == TableSync {
+ log.Infof("reset src table id from %d to %d, table %s", j.Src.TableId, backupObject.Id, table)
+ j.Src.TableId = backupObject.Id
}
+ return j.newSnapshot(j.progress.CommitSeq)
+ } else if _, ok := tableCommitSeqMap[backupObject.Id]; !ok {
+ return xerror.Errorf(xerror.Normal, "commit seq not found, table id %d, table name: %s", backupObject.Id, table)
}
inMemoryData := &inMemoryData{
SnapshotName: snapshotName,
SnapshotResp: snapshotResp,
TableCommitSeqMap: tableCommitSeqMap,
+ TableNameMapping: tableNameMapping,
}
j.progress.NextSubVolatile(AddExtraInfo, inMemoryData)
case AddExtraInfo:
- // Step 3: Add extra info
- log.Infof("fullsync status: add extra info")
+ // Step 4: Add extra info
+ log.Infof("partial sync status: add extra info")
inMemoryData := j.progress.InMemoryData.(*inMemoryData)
snapshotResp := inMemoryData.SnapshotResp
jobInfo := snapshotResp.GetJobInfo()
- tableCommitSeqMap := inMemoryData.TableCommitSeqMap
- var jobInfoMap map[string]interface{}
- err := json.Unmarshal(jobInfo, &jobInfoMap)
- if err != nil {
- return xerror.Wrapf(err, xerror.Normal, "unmarshal jobInfo failed, jobInfo: %s", string(jobInfo))
- }
- log.Debugf("jobInfo: %v", jobInfoMap)
+ log.Infof("partial sync snapshot response meta size: %d, job info size: %d, expired at: %d",
+ len(snapshotResp.Meta), len(snapshotResp.JobInfo), snapshotResp.GetExpiredAt())
- extraInfo, err := j.genExtraInfo()
+ jobInfoBytes, err := j.addExtraInfo(jobInfo)
if err != nil {
return err
}
- log.Debugf("extraInfo: %v", extraInfo)
- jobInfoMap["extra_info"] = extraInfo
- jobInfoBytes, err := json.Marshal(jobInfoMap)
- if err != nil {
- return xerror.Errorf(xerror.Normal, "marshal jobInfo failed, jobInfo: %v", jobInfoMap)
- }
- log.Debugf("jobInfoBytes: %s", string(jobInfoBytes))
+ log.Debugf("partial sync job info size: %d, bytes: %.128s", len(jobInfoBytes), string(jobInfoBytes))
snapshotResp.SetJobInfo(jobInfoBytes)
- var commitSeq int64 = math.MaxInt64
- switch j.SyncType {
- case DBSync:
- for _, seq := range tableCommitSeqMap {
- commitSeq = utils.Min(commitSeq, seq)
- }
- j.progress.TableCommitSeqMap = tableCommitSeqMap // persist in CommitNext
- case TableSync:
- commitSeq = tableCommitSeqMap[j.Src.TableId]
- }
- j.progress.CommitNextSubWithPersist(commitSeq, RestoreSnapshot, inMemoryData)
+ j.progress.NextSubVolatile(RestoreSnapshot, inMemoryData)
case RestoreSnapshot:
- // Step 4: Restore snapshot
- log.Infof("fullsync status: restore snapshot")
+ // Step 5: Restore snapshot
+ log.Infof("partial sync status: restore snapshot")
if j.progress.InMemoryData == nil {
persistData := j.progress.PersistData
inMemoryData := &inMemoryData{}
if err := json.Unmarshal([]byte(persistData), inMemoryData); err != nil {
- // TODO: return to snapshot
return xerror.Errorf(xerror.Normal, "unmarshal persistData failed, persistData: %s", persistData)
}
j.progress.InMemoryData = inMemoryData
}
- // Step 4.1: start a new fullsync && persist
+ // Step 5.1: try reuse the exists restore job.
inMemoryData := j.progress.InMemoryData.(*inMemoryData)
snapshotName := inMemoryData.SnapshotName
+ if featureReuseRunningBackupRestoreJob {
+ name, err := j.IDest.GetValidRestoreJob(snapshotName)
+ if err != nil {
+ return nil
+ }
+ if name != "" {
+ log.Infof("partial sync status: find a valid restore job %s", name)
+ inMemoryData.RestoreLabel = name
+ j.progress.NextSubVolatile(WaitRestoreDone, inMemoryData)
+ break
+ }
+ }
+
+ // Step 5.2: start a new fullsync & restore snapshot to dest
+ restoreSnapshotName := NewRestoreLabel(snapshotName)
snapshotResp := inMemoryData.SnapshotResp
- // Step 4.2: restore snapshot to dest
dest := &j.Dest
- destRpc, err := j.rpcFactory.NewFeRpc(dest)
+ destRpc, err := j.factory.NewFeRpc(dest)
if err != nil {
return err
}
- log.Debugf("begin restore snapshot %s", snapshotName)
+ log.Debugf("partial sync begin restore snapshot %s to %s", snapshotName, restoreSnapshotName)
var tableRefs []*festruct.TTableRef
- if j.Src.IsSameHostDB(&j.Dest) {
- log.Debugf("same host db, table: %s, dest table: %s", j.Src.Table, j.Dest.Table)
+
+ // ATTN: The table name of the alias is from the source cluster.
+ if aliasName, ok := j.progress.TableAliases[table]; ok {
+ log.Infof("partial sync with table alias, table: %s, alias: %s", table, aliasName)
+ tableRefs = make([]*festruct.TTableRef, 0)
+ tableRef := &festruct.TTableRef{
+ Table: &table,
+ AliasName: &aliasName,
+ }
+ tableRefs = append(tableRefs, tableRef)
+ } else if j.isTableSyncWithAlias() {
+ log.Infof("table sync snapshot not same name, table: %s, dest table: %s", j.Src.Table, j.Dest.Table)
tableRefs = make([]*festruct.TTableRef, 0)
tableRef := &festruct.TTableRef{
Table: &j.Src.Table,
@@ -422,618 +599,2175 @@ func (j *Job) fullSync() error {
}
tableRefs = append(tableRefs, tableRef)
}
- restoreResp, err := destRpc.RestoreSnapshot(dest, tableRefs, snapshotName, snapshotResp)
+
+ restoreReq := rpc.RestoreSnapshotRequest{
+ TableRefs: tableRefs,
+ SnapshotName: restoreSnapshotName,
+ SnapshotResult: snapshotResp,
+
+ // DO NOT drop exists tables and partitions
+ CleanPartitions: false,
+ CleanTables: false,
+ AtomicRestore: false,
+ Compress: false,
+ }
+ restoreResp, err := destRpc.RestoreSnapshot(dest, &restoreReq)
if err != nil {
return err
}
- log.Infof("resp: %v", restoreResp)
+ if restoreResp.Status.GetStatusCode() != tstatus.TStatusCode_OK {
+ return xerror.Errorf(xerror.Normal, "restore snapshot failed, status: %v", restoreResp.Status)
+ }
+ log.Infof("partial sync restore snapshot resp: %v", restoreResp)
+ inMemoryData.RestoreLabel = restoreSnapshotName
+
+ j.progress.NextSubVolatile(WaitRestoreDone, inMemoryData)
+ return nil
+
+ case WaitRestoreDone:
+ // Step 6: Wait restore job done
+ inMemoryData := j.progress.InMemoryData.(*inMemoryData)
+ restoreSnapshotName := inMemoryData.RestoreLabel
+ snapshotResp := inMemoryData.SnapshotResp
- // TODO: impl wait for done, use show restore
- restoreFinished, err := j.IDest.CheckRestoreFinished(snapshotName)
+ if snapshotResp.GetExpiredAt() > 0 && time.Now().UnixMilli() > snapshotResp.GetExpiredAt() {
+ log.Infof("partial sync snapshot %s is expired, cancel and retry with new partial sync", restoreSnapshotName)
+ if err := j.IDest.CancelRestoreIfExists(restoreSnapshotName); err != nil {
+ return err
+ }
+ replace := len(j.progress.TableAliases) > 0
+ return j.newPartialSnapshot(tableId, table, partitions, replace)
+ }
+
+ restoreFinished, err := j.IDest.CheckRestoreFinished(restoreSnapshotName)
if err != nil {
+ j.progress.NextSubVolatile(RestoreSnapshot, inMemoryData)
return err
}
+
if !restoreFinished {
- err = xerror.Errorf(xerror.Normal, "check restore state timeout, max try times: %d", base.MAX_CHECK_RETRY_TIMES)
- return err
+ log.Infof("partial sync status: restore job %s is running", restoreSnapshotName)
+ return nil
}
- j.progress.NextSubCheckpoint(PersistRestoreInfo, snapshotName)
+
+ // save the entire commit seq map, this value will be used in PersistRestoreInfo.
+ j.progress.TableCommitSeqMap = utils.MergeMap(
+ j.progress.TableCommitSeqMap, inMemoryData.TableCommitSeqMap)
+ j.progress.TableNameMapping = utils.MergeMap(
+ j.progress.TableNameMapping, inMemoryData.TableNameMapping)
+ j.progress.NextSubCheckpoint(PersistRestoreInfo, restoreSnapshotName)
case PersistRestoreInfo:
- // Step 5: Update job progress && dest table id
+ // Step 7: Update job progress && dest table id
// update job info, only for dest table id
- log.Infof("fullsync status: persist restore info")
+ var targetName = table
+ if j.isTableSyncWithAlias() {
+ targetName = j.Dest.Table
+ }
+ if alias, ok := j.progress.TableAliases[table]; ok {
+ // check table exists to ensure the idempotent
+ if exist, err := j.IDest.CheckTableExistsByName(alias); err != nil {
+ return err
+ } else if exist {
+ if exists, err := j.IDest.CheckTableExistsByName(targetName); err != nil {
+ return err
+ } else if exists {
+ log.Infof("partial sync swap table with alias, table: %s, alias: %s", targetName, alias)
+ swap := false // drop the old table
+ if err := j.IDest.ReplaceTable(alias, targetName, swap); err != nil {
+ return err
+ }
+ } else {
+ log.Infof("partial sync rename table alias %s to %s", alias, targetName)
+ if err := j.IDest.RenameTableWithName(alias, targetName); err != nil {
+ return err
+ }
+ }
+ // Since the meta of dest table has been changed, refresh it.
+ j.destMeta.ClearTablesCache()
+ } else {
+ log.Infof("partial sync the table alias has been swapped, table: %s, alias: %s", targetName, alias)
+ }
- // TODO: retry && mark it for not start a new full sync
+ // Save the replace result
+ j.progress.TableAliases = nil
+ j.progress.NextSubCheckpoint(PersistRestoreInfo, j.progress.PersistData)
+ }
+
+ log.Infof("partial sync status: persist restore info")
+ destTable, err := j.destMeta.UpdateTable(targetName, 0)
+ if err != nil {
+ return err
+ }
switch j.SyncType {
case DBSync:
+ j.progress.TableMapping[tableId] = destTable.Id
j.progress.NextWithPersist(j.progress.CommitSeq, DBTablesIncrementalSync, Done, "")
case TableSync:
- if destTable, err := j.destMeta.UpdateTable(j.Dest.Table, 0); err != nil {
- return err
- } else {
- j.Dest.TableId = destTable.Id
- }
-
- // TODO: reload check job table id
- if err := j.persistJob(); err != nil {
- return err
+ commitSeq, ok := j.progress.TableCommitSeqMap[j.Src.TableId]
+ if !ok {
+ return xerror.Errorf(xerror.Normal, "table id %d, commit seq not found", j.Src.TableId)
}
-
+ j.Dest.TableId = destTable.Id
+ j.progress.TableMapping = nil
j.progress.TableCommitSeqMap = nil
- j.progress.NextWithPersist(j.progress.CommitSeq, TableIncrementalSync, Done, "")
+ j.progress.NextWithPersist(commitSeq, TableIncrementalSync, Done, "")
default:
return xerror.Errorf(xerror.Normal, "invalid sync type %d", j.SyncType)
}
return nil
+
default:
return xerror.Errorf(xerror.Normal, "invalid job sub sync state %d", j.progress.SubSyncState)
}
- return j.fullSync()
-}
-
-func (j *Job) persistJob() error {
- data, err := json.Marshal(j)
- if err != nil {
- return xerror.Errorf(xerror.Normal, "marshal job failed, job: %v", j)
- }
-
- if err := j.db.UpdateJob(j.Name, string(data)); err != nil {
- return err
- }
-
- return nil
+ return j.partialSync()
}
-// FIXME: label will conflict when commitSeq equal
-func (j *Job) newLabel(commitSeq int64) string {
- src := &j.Src
- dest := &j.Dest
- randNum := rand.Intn(65536) // hex 4 chars
- if j.SyncType == DBSync {
- // label "ccrj-rand:${sync_type}:${src_db_id}:${dest_db_id}:${commit_seq}"
- return fmt.Sprintf("ccrj-%x:%s:%d:%d:%d", randNum, j.SyncType, src.DbId, dest.DbId, commitSeq)
- } else {
- // TableSync
- // label "ccrj-rand:${sync_type}:${src_db_id}_${src_table_id}:${dest_db_id}_${dest_table_id}:${commit_seq}"
- return fmt.Sprintf("ccrj-%x:%s:%d_%d:%d_%d:%d", randNum, j.SyncType, src.DbId, src.TableId, dest.DbId, dest.TableId, commitSeq)
+func (j *Job) fullSync() error {
+ type inMemoryData struct {
+ SnapshotName string `json:"snapshot_name"`
+ SnapshotResp *festruct.TGetSnapshotResult_ `json:"snapshot_resp"`
+ TableCommitSeqMap map[int64]int64 `json:"table_commit_seq_map"`
+ TableNameMapping map[int64]string `json:"table_name_mapping"`
+ Views []string `json:"views"`
+ RestoreLabel string `json:"restore_label"`
}
-}
-// only called by DBSync, TableSync tableId is in Src/Dest Spec
-// TODO: [Performance] improve by cache
-func (j *Job) getDestTableIdBySrc(srcTableId int64) (int64, error) {
- if destTableId, ok := j.destSrcTableIdMap[srcTableId]; ok {
- return destTableId, nil
- }
+ switch j.progress.SubSyncState {
+ case Done:
+ log.Infof("fullsync status: done")
+ if err := j.newSnapshot(j.progress.CommitSeq); err != nil {
+ return err
+ }
- srcTableName, err := j.srcMeta.GetTableNameById(srcTableId)
- if err != nil {
- return 0, err
- }
+ case BeginCreateSnapshot:
+ // Step 1: Create snapshot
+ prefix := NewSnapshotLabelPrefix(j.Name, j.progress.SyncId)
+ log.Infof("fullsync status: create snapshot with prefix %s", prefix)
- if destTableId, err := j.destMeta.GetTableId(srcTableName); err != nil {
- return 0, err
- } else {
- j.destSrcTableIdMap[srcTableId] = destTableId
- return destTableId, nil
- }
-}
+ if featureReuseRunningBackupRestoreJob {
+ snapshotName, err := j.ISrc.GetValidBackupJob(prefix)
+ if err != nil {
+ return err
+ }
+ if snapshotName != "" {
+ log.Infof("fullsync status: find a valid backup job %s", snapshotName)
+ j.progress.NextSubVolatile(WaitBackupDone, snapshotName)
+ return nil
+ }
+ }
-func (j *Job) getDbSyncTableRecords(upsert *record.Upsert) ([]*record.TableRecord, error) {
- commitSeq := upsert.CommitSeq
- tableCommitSeqMap := j.progress.TableCommitSeqMap
- tableRecords := make([]*record.TableRecord, 0, len(upsert.TableRecords))
+ backupTableList := make([]string, 0)
+ switch j.SyncType {
+ case DBSync:
+ tables, err := j.srcMeta.GetTables()
+ if err != nil {
+ return err
+ }
+ if len(tables) == 0 {
+ log.Warnf("full sync but source db is empty! retry later")
+ return nil
+ }
+ case TableSync:
+ backupTableList = append(backupTableList, j.Src.Table)
+ default:
+ return xerror.Errorf(xerror.Normal, "invalid sync type %s", j.SyncType)
+ }
- for tableId, tableRecord := range upsert.TableRecords {
- // DBIncrementalSync
- if tableCommitSeqMap == nil {
- tableRecords = append(tableRecords, tableRecord)
- continue
+ snapshotName := NewLabelWithTs(prefix)
+ if err := j.ISrc.CreateSnapshot(snapshotName, backupTableList); err != nil {
+ return err
}
+ j.progress.NextSubVolatile(WaitBackupDone, snapshotName)
+ return nil
- if tableCommitSeq, ok := tableCommitSeqMap[tableId]; ok {
- if commitSeq > tableCommitSeq {
- tableRecords = append(tableRecords, tableRecord)
- }
- } else {
- // TODO: check
+ case WaitBackupDone:
+ // Step 2: Wait backup job done
+ snapshotName := j.progress.InMemoryData.(string)
+ backupFinished, err := j.ISrc.CheckBackupFinished(snapshotName)
+ if err != nil {
+ j.progress.NextSubVolatile(BeginCreateSnapshot, snapshotName)
+ return err
+ }
+ if !backupFinished {
+ log.Infof("fullsync status: backup job %s is running", snapshotName)
+ return nil
}
- }
- return tableRecords, nil
-}
+ j.progress.NextSubCheckpoint(GetSnapshotInfo, snapshotName)
-func (j *Job) getReleatedTableRecords(upsert *record.Upsert) ([]*record.TableRecord, error) {
- var tableRecords []*record.TableRecord //, 0, len(upsert.TableRecords))
+ case GetSnapshotInfo:
+ // Step 3: Get snapshot info
+ log.Infof("fullsync status: get snapshot info")
- switch j.SyncType {
- case DBSync:
- records, err := j.getDbSyncTableRecords(upsert)
+ snapshotName := j.progress.PersistData
+ src := &j.Src
+ srcRpc, err := j.factory.NewFeRpc(src)
if err != nil {
- return nil, err
+ return err
}
- if len(records) == 0 {
- return nil, nil
+ log.Debugf("fullsync begin get snapshot %s", snapshotName)
+ compress := false
+ snapshotResp, err := srcRpc.GetSnapshot(src, snapshotName, compress)
+ if err != nil {
+ return err
}
- tableRecords = records
- case TableSync:
- tableRecord, ok := upsert.TableRecords[j.Src.TableId]
- if !ok {
- return nil, xerror.Errorf(xerror.Normal, "table record not found, table: %s", j.Src.Table)
+
+ if snapshotResp.Status.GetStatusCode() == tstatus.TStatusCode_SNAPSHOT_NOT_EXIST ||
+ snapshotResp.Status.GetStatusCode() == tstatus.TStatusCode_SNAPSHOT_EXPIRED {
+ log.Warnf("get snapshot %s: %s (%s), retry with new full sync", snapshotName,
+ utils.FirstOr(snapshotResp.Status.GetErrorMsgs(), "unknown"),
+ snapshotResp.Status.GetStatusCode())
+ return j.newSnapshot(j.progress.CommitSeq)
+ } else if snapshotResp.Status.GetStatusCode() != tstatus.TStatusCode_OK {
+ err = xerror.Errorf(xerror.FE, "get snapshot failed, status: %v", snapshotResp.Status)
+ return err
}
+
+ if !snapshotResp.IsSetJobInfo() {
+ return xerror.New(xerror.Normal, "jobInfo is not set")
+ }
+
+ if snapshotResp.GetCompressed() {
+ if bytes, err := utils.GZIPDecompress(snapshotResp.GetJobInfo()); err != nil {
+ return xerror.Wrap(err, xerror.Normal, "decompress snapshot job info failed")
+ } else {
+ snapshotResp.SetJobInfo(bytes)
+ }
+ if bytes, err := utils.GZIPDecompress(snapshotResp.GetMeta()); err != nil {
+ return xerror.Wrap(err, xerror.Normal, "decompress snapshot meta failed")
+ } else {
+ snapshotResp.SetMeta(bytes)
+ }
+ }
+
+ log.Tracef("fullsync snapshot job: %.128s", snapshotResp.GetJobInfo())
+ backupJobInfo, err := NewBackupJobInfoFromJson(snapshotResp.GetJobInfo())
+ if err != nil {
+ return err
+ }
+
+ tableCommitSeqMap := backupJobInfo.TableCommitSeqMap
+ tableNameMapping := backupJobInfo.TableNameMapping()
+ views := backupJobInfo.Views()
+
+ if j.SyncType == TableSync {
+ if backupObject, ok := backupJobInfo.BackupObjects[j.Src.Table]; !ok {
+ return xerror.Errorf(xerror.Normal, "table %s not found in backup objects", j.Src.Table)
+ } else if backupObject.Id != j.Src.TableId {
+ // Might be the table has been replace.
+ log.Warnf("full sync table %s id not match, force full sync and reset table id from %d to %d",
+ j.Src.Table, j.Src.TableId, backupObject.Id)
+ j.Src.TableId = backupObject.Id
+ return j.newSnapshot(j.progress.CommitSeq)
+ } else if _, ok := tableCommitSeqMap[j.Src.TableId]; !ok {
+ return xerror.Errorf(xerror.Normal, "table id %d, commit seq not found", j.Src.TableId)
+ }
+ } else {
+ // save the view ids in the table commit seq map, to build the view mapping latter.
+ for _, view := range backupJobInfo.NewBackupObjects.Views {
+ tableNameMapping[view.Id] = view.Name
+ tableCommitSeqMap[view.Id] = snapshotResp.GetCommitSeq() // zero if not exists
+ }
+ }
+
+ inMemoryData := &inMemoryData{
+ SnapshotName: snapshotName,
+ SnapshotResp: snapshotResp,
+ TableCommitSeqMap: tableCommitSeqMap,
+ TableNameMapping: tableNameMapping,
+ Views: views,
+ }
+ j.progress.NextSubVolatile(AddExtraInfo, inMemoryData)
+
+ case AddExtraInfo:
+ // Step 4: Add extra info
+ log.Infof("fullsync status: add extra info")
+
+ inMemoryData := j.progress.InMemoryData.(*inMemoryData)
+ snapshotResp := inMemoryData.SnapshotResp
+ jobInfo := snapshotResp.GetJobInfo()
+
+ log.Infof("snapshot response meta size: %d, job info size: %d, expired at: %d, commit seq: %d",
+ len(snapshotResp.Meta), len(snapshotResp.JobInfo), snapshotResp.GetExpiredAt(), snapshotResp.GetCommitSeq())
+
+ jobInfoBytes, err := j.addExtraInfo(jobInfo)
+ if err != nil {
+ return err
+ }
+ log.Debugf("job info size: %d, bytes: %.128s", len(jobInfoBytes), string(jobInfoBytes))
+ snapshotResp.SetJobInfo(jobInfoBytes)
+
+ j.progress.NextSubVolatile(RestoreSnapshot, inMemoryData)
+
+ case RestoreSnapshot:
+ // Step 5: Restore snapshot
+ log.Infof("fullsync status: restore snapshot")
+
+ if j.progress.InMemoryData == nil {
+ persistData := j.progress.PersistData
+ inMemoryData := &inMemoryData{}
+ if err := json.Unmarshal([]byte(persistData), inMemoryData); err != nil {
+ return xerror.Errorf(xerror.Normal, "unmarshal persistData failed, persistData: %s", persistData)
+ }
+ j.progress.InMemoryData = inMemoryData
+ }
+
+ // Step 5.1: cancel the running restore job which by the former process, if exists
+ inMemoryData := j.progress.InMemoryData.(*inMemoryData)
+ snapshotName := inMemoryData.SnapshotName
+ if featureReuseRunningBackupRestoreJob {
+ restoreSnapshotName, err := j.IDest.GetValidRestoreJob(snapshotName)
+ if err != nil {
+ return nil
+ }
+ if restoreSnapshotName != "" {
+ log.Infof("fullsync status: find a valid restore job %s", restoreSnapshotName)
+ inMemoryData.RestoreLabel = restoreSnapshotName
+ j.progress.NextSubVolatile(WaitRestoreDone, inMemoryData)
+ break
+ }
+ }
+
+ // Step 5.2: start a new fullsync & restore snapshot to dest
+ restoreSnapshotName := NewRestoreLabel(snapshotName)
+ snapshotResp := inMemoryData.SnapshotResp
+ tableNameMapping := inMemoryData.TableNameMapping
+
+ dest := &j.Dest
+ destRpc, err := j.factory.NewFeRpc(dest)
+ if err != nil {
+ return err
+ }
+ log.Debugf("begin restore snapshot %s to %s", snapshotName, restoreSnapshotName)
+
+ var tableRefs []*festruct.TTableRef
+ if j.isTableSyncWithAlias() {
+ log.Debugf("table sync snapshot not same name, table: %s, dest table: %s", j.Src.Table, j.Dest.Table)
+ tableRefs = make([]*festruct.TTableRef, 0)
+ tableRef := &festruct.TTableRef{
+ Table: &j.Src.Table,
+ AliasName: &j.Dest.Table,
+ }
+ tableRefs = append(tableRefs, tableRef)
+ }
+ if len(j.progress.TableAliases) > 0 {
+ tableRefs = make([]*festruct.TTableRef, 0)
+ viewMap := make(map[string]interface{})
+ for _, viewName := range inMemoryData.Views {
+ log.Debugf("fullsync alias with view ref %s", viewName)
+ viewMap[viewName] = nil
+ tableRef := &festruct.TTableRef{Table: utils.ThriftValueWrapper(viewName)}
+ tableRefs = append(tableRefs, tableRef)
+ }
+ for _, tableName := range tableNameMapping {
+ if alias, ok := j.progress.TableAliases[tableName]; ok {
+ log.Debugf("fullsync alias skip table ref %s because it has alias %s", tableName, alias)
+ continue
+ }
+ if _, ok := viewMap[tableName]; ok {
+ continue
+ }
+ log.Debugf("fullsync alias with table ref %s", tableName)
+ tableRef := &festruct.TTableRef{Table: utils.ThriftValueWrapper(tableName)}
+ tableRefs = append(tableRefs, tableRef)
+ }
+ for table, alias := range j.progress.TableAliases {
+ log.Infof("fullsync alias table from %s to %s", table, alias)
+ tableRef := &festruct.TTableRef{
+ Table: utils.ThriftValueWrapper(table),
+ AliasName: utils.ThriftValueWrapper(alias),
+ }
+ tableRefs = append(tableRefs, tableRef)
+ }
+ }
+
+ compress := false
+ if featureCompressedSnapshot {
+ if enable, err := j.IDest.IsEnableRestoreSnapshotCompression(); err != nil {
+ return xerror.Wrap(err, xerror.Normal, "check enable restore snapshot compression failed")
+ } else {
+ compress = enable
+ }
+ }
+ restoreReq := rpc.RestoreSnapshotRequest{
+ TableRefs: tableRefs,
+ SnapshotName: restoreSnapshotName,
+ SnapshotResult: snapshotResp,
+ CleanPartitions: false,
+ CleanTables: false,
+ AtomicRestore: false,
+ Compress: compress,
+ }
+ if featureCleanTableAndPartitions {
+ // drop exists partitions, and drop tables if in db sync.
+ restoreReq.CleanPartitions = true
+ if j.SyncType == DBSync {
+ restoreReq.CleanTables = true
+ }
+ }
+ if featureAtomicRestore {
+ restoreReq.AtomicRestore = true
+ }
+ restoreResp, err := destRpc.RestoreSnapshot(dest, &restoreReq)
+ if err != nil {
+ return err
+ }
+ if restoreResp.Status.GetStatusCode() != tstatus.TStatusCode_OK {
+ return xerror.Errorf(xerror.Normal, "restore snapshot failed, status: %v", restoreResp.Status)
+ }
+ log.Infof("resp: %v", restoreResp)
+
+ inMemoryData.RestoreLabel = restoreSnapshotName
+ j.progress.NextSubVolatile(WaitRestoreDone, inMemoryData)
+ return nil
+
+ case WaitRestoreDone:
+ // Step 6: Wait restore job done
+ inMemoryData := j.progress.InMemoryData.(*inMemoryData)
+ restoreSnapshotName := inMemoryData.RestoreLabel
+ tableNameMapping := inMemoryData.TableNameMapping
+ snapshotResp := inMemoryData.SnapshotResp
+
+ if snapshotResp.GetExpiredAt() > 0 && time.Now().UnixMilli() > snapshotResp.GetExpiredAt() {
+ log.Infof("fullsync snapshot %s is expired, cancel and retry with new full sync", restoreSnapshotName)
+ if err := j.IDest.CancelRestoreIfExists(restoreSnapshotName); err != nil {
+ return err
+ }
+ return j.newSnapshot(j.progress.CommitSeq)
+ }
+
+ for {
+ restoreFinished, err := j.IDest.CheckRestoreFinished(restoreSnapshotName)
+ if err != nil && errors.Is(err, base.ErrRestoreSignatureNotMatched) {
+ // We need rebuild the exists table.
+ var tableName string
+ var tableOrView bool = true
+ if j.SyncType == TableSync {
+ tableName = j.Dest.Table
+ } else {
+ tableName, tableOrView, err = j.IDest.GetRestoreSignatureNotMatchedTableOrView(restoreSnapshotName)
+ if err != nil || len(tableName) == 0 {
+ continue
+ }
+ }
+
+ resource := "table"
+ if !tableOrView {
+ resource = "view"
+ }
+ log.Infof("the signature of %s %s is not matched with the target table in snapshot", resource, tableName)
+ if tableOrView && featureReplaceNotMatchedWithAlias {
+ if j.progress.TableAliases == nil {
+ j.progress.TableAliases = make(map[string]string)
+ }
+ j.progress.TableAliases[tableName] = TableAlias(tableName)
+ j.progress.NextSubVolatile(RestoreSnapshot, inMemoryData)
+ break
+ }
+ for {
+ if tableOrView {
+ if err := j.IDest.DropTable(tableName, false); err == nil {
+ break
+ }
+ } else {
+ if err := j.IDest.DropView(tableName); err == nil {
+ break
+ }
+ }
+ }
+ log.Infof("the restore is cancelled, the unmatched %s %s is dropped, restore snapshot again", resource, tableName)
+ break
+ } else if err != nil {
+ j.progress.NextSubVolatile(RestoreSnapshot, inMemoryData)
+ return err
+ }
+
+ if !restoreFinished {
+ log.Infof("fullsync status: restore job %s is running", restoreSnapshotName)
+ return nil
+ }
+
+ tableCommitSeqMap := inMemoryData.TableCommitSeqMap
+ var commitSeq int64 = math.MaxInt64
+ switch j.SyncType {
+ case DBSync:
+ for tableId, seq := range tableCommitSeqMap {
+ if seq == 0 {
+ // Skip the views
+ continue
+ }
+ commitSeq = utils.Min(commitSeq, seq)
+ log.Debugf("fullsync table commit seq, table id: %d, commit seq: %d", tableId, seq)
+ }
+ if snapshotResp.GetCommitSeq() > 0 {
+ commitSeq = utils.Min(commitSeq, snapshotResp.GetCommitSeq())
+ }
+ j.progress.TableCommitSeqMap = tableCommitSeqMap // persist in CommitNext
+ j.progress.TableNameMapping = tableNameMapping
+ case TableSync:
+ commitSeq = tableCommitSeqMap[j.Src.TableId]
+ }
+
+ j.progress.CommitNextSubWithPersist(commitSeq, PersistRestoreInfo, restoreSnapshotName)
+ break
+ }
+
+ case PersistRestoreInfo:
+ // Step 7: Update job progress && dest table id
+ // update job info, only for dest table id
+
+ if len(j.progress.TableAliases) > 0 {
+ log.Infof("fullsync swap %d tables with aliases", len(j.progress.TableAliases))
+
+ var tables []string
+ for table := range j.progress.TableAliases {
+ tables = append(tables, table)
+ }
+ for _, table := range tables {
+ alias := j.progress.TableAliases[table]
+ targetName := table
+ if j.isTableSyncWithAlias() {
+ targetName = j.Dest.Table
+ }
+
+ // check table exists to ensure the idempotent
+ if exist, err := j.IDest.CheckTableExistsByName(alias); err != nil {
+ return err
+ } else if exist {
+ log.Infof("fullsync swap table with alias, table: %s, alias: %s", targetName, alias)
+ swap := false // drop the old table
+ if err := j.IDest.ReplaceTable(alias, targetName, swap); err != nil {
+ return err
+ }
+ } else {
+ log.Infof("fullsync the table alias has been swapped, table: %s, alias: %s", targetName, alias)
+ }
+ }
+ // Since the meta of dest table has been changed, refresh it.
+ j.destMeta.ClearTablesCache()
+
+ // Save the replace result
+ j.progress.TableAliases = nil
+ j.progress.NextSubCheckpoint(PersistRestoreInfo, j.progress.PersistData)
+ }
+
+ log.Infof("fullsync status: persist restore info")
+
+ switch j.SyncType {
+ case DBSync:
+ // refresh dest meta cache before building table mapping.
+ j.destMeta.ClearTablesCache()
+ tableMapping := make(map[int64]int64)
+ for srcTableId := range j.progress.TableCommitSeqMap {
+ var srcTableName string
+ if name, ok := j.progress.TableNameMapping[srcTableId]; ok {
+ srcTableName = name
+ } else {
+ // Keep compatible, but once the upstream table is renamed, the
+ // downstream table id will not be found here.
+ name, err := j.srcMeta.GetTableNameById(srcTableId)
+ if err != nil {
+ return err
+ }
+ srcTableName = name
+
+ // If srcTableName is empty, it may be deleted.
+ // No need to map it to dest table
+ if srcTableName == "" {
+ log.Warnf("the name of source table id: %d is empty, no need to map it to dest table", srcTableId)
+ continue
+ }
+ }
+
+ destTableId, err := j.destMeta.GetTableId(srcTableName)
+ if err != nil {
+ return err
+ }
+
+ log.Debugf("fullsync table mapping, src: %d, dest: %d, name: %s",
+ srcTableId, destTableId, srcTableName)
+ tableMapping[srcTableId] = destTableId
+ }
+
+ j.progress.TableMapping = tableMapping
+ j.progress.ShadowIndexes = nil
+ j.progress.NextWithPersist(j.progress.CommitSeq, DBTablesIncrementalSync, Done, "")
+ case TableSync:
+ if destTable, err := j.destMeta.UpdateTable(j.Dest.Table, 0); err != nil {
+ return err
+ } else {
+ j.Dest.TableId = destTable.Id
+ }
+
+ if err := j.persistJob(); err != nil {
+ return err
+ }
+
+ j.progress.TableCommitSeqMap = nil
+ j.progress.TableMapping = nil
+ j.progress.ShadowIndexes = nil
+ j.progress.NextWithPersist(j.progress.CommitSeq, TableIncrementalSync, Done, "")
+ default:
+ return xerror.Errorf(xerror.Normal, "invalid sync type %d", j.SyncType)
+ }
+
+ return nil
+ default:
+ return xerror.Errorf(xerror.Normal, "invalid job sub sync state %d", j.progress.SubSyncState)
+ }
+
+ return j.fullSync()
+}
+
+func (j *Job) persistJob() error {
+ data, err := json.Marshal(j)
+ if err != nil {
+ return xerror.Errorf(xerror.Normal, "marshal job failed, job: %v", j)
+ }
+
+ if err := j.db.UpdateJob(j.Name, string(data)); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (j *Job) newLabel(commitSeq int64) string {
+ src := &j.Src
+ dest := &j.Dest
+ randNum := rand.Intn(65536) // hex 4 chars
+ if j.SyncType == DBSync {
+ // label "ccrj-rand:${sync_type}:${src_db_id}:${dest_db_id}:${commit_seq}"
+ return fmt.Sprintf("ccrj-%x:%s:%d:%d:%d", randNum, j.SyncType, src.DbId, dest.DbId, commitSeq)
+ } else {
+ // TableSync
+ // label "ccrj-rand:${sync_type}:${src_db_id}_${src_table_id}:${dest_db_id}_${dest_table_id}:${commit_seq}"
+ return fmt.Sprintf("ccrj-%x:%s:%d_%d:%d_%d:%d", randNum, j.SyncType, src.DbId, src.TableId, dest.DbId, dest.TableId, commitSeq)
+ }
+}
+
+// only called by DBSync, TableSync tableId is in Src/Dest Spec
+func (j *Job) getDestTableIdBySrc(srcTableId int64) (int64, error) {
+ if j.progress.TableMapping != nil {
+ if destTableId, ok := j.progress.TableMapping[srcTableId]; ok {
+ return destTableId, nil
+ }
+ log.Warnf("table mapping not found, src table id: %d", srcTableId)
+ } else {
+ log.Warnf("table mapping not found, src table id: %d", srcTableId)
+ j.progress.TableMapping = make(map[int64]int64)
+ }
+
+ // WARNING: the table name might be changed, and the TableMapping has been updated in time,
+ // only keep this for compatible.
+ srcTableName, err := j.srcMeta.GetTableNameById(srcTableId)
+ if err != nil {
+ return 0, err
+ }
+
+ if destTableId, err := j.destMeta.GetTableId(srcTableName); err != nil {
+ return 0, err
+ } else {
+ j.progress.TableMapping[srcTableId] = destTableId
+ return destTableId, nil
+ }
+}
+
+func (j *Job) getDestNameBySrcId(srcTableId int64) (string, error) {
+ destTableId, err := j.getDestTableIdBySrc(srcTableId)
+ if err != nil {
+ return "", err
+ }
+
+ name, err := j.destMeta.GetTableNameById(destTableId)
+ if err != nil {
+ return "", err
+ }
+
+ if name == "" {
+ return "", xerror.Errorf(xerror.Normal, "dest table name not found, dest table id: %d", destTableId)
+ }
+
+ return name, nil
+}
+
+func (j *Job) isBinlogCommitted(tableId int64, binlogCommitSeq int64) bool {
+ if j.progress.SyncState == DBTablesIncrementalSync {
+ tableCommitSeq, ok := j.progress.TableCommitSeqMap[tableId]
+ if ok && binlogCommitSeq <= tableCommitSeq {
+ log.Infof("filter the already committed binlog %d, table commit seq: %d, table: %d",
+ binlogCommitSeq, tableCommitSeq, tableId)
+ return true
+ }
+ }
+ return false
+}
+
+func (j *Job) getDbSyncTableRecords(upsert *record.Upsert) []*record.TableRecord {
+ commitSeq := upsert.CommitSeq
+ tableCommitSeqMap := j.progress.TableCommitSeqMap
+ tableRecords := make([]*record.TableRecord, 0, len(upsert.TableRecords))
+
+ for tableId, tableRecord := range upsert.TableRecords {
+ // DBIncrementalSync
+ if tableCommitSeqMap == nil {
+ tableRecords = append(tableRecords, tableRecord)
+ continue
+ }
+
+ if tableCommitSeq, ok := tableCommitSeqMap[tableId]; ok {
+ if commitSeq > tableCommitSeq {
+ tableRecords = append(tableRecords, tableRecord)
+ }
+ } else {
+ // for db partial sync
+ tableRecords = append(tableRecords, tableRecord)
+ }
+ }
+
+ return tableRecords
+}
+
+func (j *Job) getRelatedTableRecords(upsert *record.Upsert) ([]*record.TableRecord, error) {
+ var tableRecords []*record.TableRecord //, 0, len(upsert.TableRecords))
+
+ switch j.SyncType {
+ case DBSync:
+ records := j.getDbSyncTableRecords(upsert)
+ if len(records) == 0 {
+ return nil, nil
+ }
+ tableRecords = records
+ case TableSync:
+ tableRecord, ok := upsert.TableRecords[j.Src.TableId]
+ if !ok {
+ return nil, xerror.Errorf(xerror.Normal, "table record not found, table: %s", j.Src.Table)
+ }
+
tableRecords = make([]*record.TableRecord, 0, 1)
tableRecords = append(tableRecords, tableRecord)
default:
- return nil, xerror.Errorf(xerror.Normal, "invalid sync type: %s", j.SyncType)
+ return nil, xerror.Errorf(xerror.Normal, "invalid sync type: %s", j.SyncType)
+ }
+
+ return tableRecords, nil
+}
+
+// Table ingestBinlog
+func (j *Job) ingestBinlog(txnId int64, tableRecords []*record.TableRecord) ([]*ttypes.TTabletCommitInfo, error) {
+ log.Infof("ingestBinlog, txnId: %d", txnId)
+
+ job, err := j.jobFactory.CreateJob(NewIngestContext(txnId, tableRecords, j.progress.TableMapping), j, "IngestBinlog")
+ if err != nil {
+ return nil, err
+ }
+
+ ingestBinlogJob, ok := job.(*IngestBinlogJob)
+ if !ok {
+ return nil, xerror.Errorf(xerror.Normal, "invalid job type, job: %+v", job)
+ }
+
+ job.Run()
+ if err := job.Error(); err != nil {
+ return nil, err
+ }
+ return ingestBinlogJob.CommitInfos(), nil
+}
+
+// Table ingestBinlog for txn insert
+func (j *Job) ingestBinlogForTxnInsert(txnId int64, tableRecords []*record.TableRecord, stidMap map[int64]int64, destTableId int64) ([]*festruct.TSubTxnInfo, error) {
+ log.Infof("ingestBinlogForTxnInsert, txnId: %d", txnId)
+
+ job, err := j.jobFactory.CreateJob(NewIngestContextForTxnInsert(txnId, tableRecords, j.progress.TableMapping, stidMap), j, "IngestBinlog")
+ if err != nil {
+ return nil, err
+ }
+
+ ingestBinlogJob, ok := job.(*IngestBinlogJob)
+ if !ok {
+ return nil, xerror.Errorf(xerror.Normal, "invalid job type, job: %+v", job)
+ }
+
+ job.Run()
+ if err := job.Error(); err != nil {
+ return nil, err
+ }
+
+ stidToCommitInfos := ingestBinlogJob.SubTxnToCommitInfos()
+ subTxnInfos := make([]*festruct.TSubTxnInfo, 0, len(stidMap))
+ for sourceStid, destStid := range stidMap {
+ destStid := destStid // if no this line, every element in subTxnInfos is the last tSubTxnInfo
+ commitInfos := stidToCommitInfos[destStid]
+ if commitInfos == nil {
+ log.Warnf("no commit infos from source stid: %d; dest stid %d, just skip", sourceStid, destStid)
+ continue
+ }
+
+ tSubTxnInfo := &festruct.TSubTxnInfo{
+ SubTxnId: &destStid,
+ TableId: &destTableId,
+ TabletCommitInfos: commitInfos,
+ }
+
+ subTxnInfos = append(subTxnInfos, tSubTxnInfo)
+ }
+
+ return subTxnInfos, nil
+}
+
+func (j *Job) handleUpsertWithRetry(binlog *festruct.TBinlog) error {
+ err := j.handleUpsert(binlog)
+ if !xerror.IsCategory(err, xerror.Meta) {
+ return err
+ }
+
+ log.Warnf("a meta error occurred, retry to handle upsert binlog again, commitSeq: %d", binlog.GetCommitSeq())
+ return j.handleUpsert(binlog)
+}
+
+func (j *Job) handleUpsert(binlog *festruct.TBinlog) error {
+ log.Infof("handle upsert binlog, sub sync state: %s, prevCommitSeq: %d, commitSeq: %d",
+ j.progress.SubSyncState, j.progress.PrevCommitSeq, j.progress.CommitSeq)
+
+ // inMemory will be update in state machine, but progress keep any, so progress.inMemory is also latest, well call NextSubCheckpoint don't need to upate inMemory in progress
+ type inMemoryData struct {
+ CommitSeq int64 `json:"commit_seq"`
+ TxnId int64 `json:"txn_id"`
+ DestTableIds []int64 `json:"dest_table_ids"`
+ TableRecords []*record.TableRecord `json:"table_records"`
+ CommitInfos []*ttypes.TTabletCommitInfo `json:"commit_infos"`
+ IsTxnInsert bool `json:"is_txn_insert"`
+ SourceStids []int64 `json:"source_stid"`
+ DestStids []int64 `json:"desc_stid"`
+ SubTxnInfos []*festruct.TSubTxnInfo `json:"sub_txn_infos"`
+ }
+
+ updateInMemory := func() error {
+ if j.progress.InMemoryData == nil {
+ persistData := j.progress.PersistData
+ inMemoryData := &inMemoryData{}
+ if err := json.Unmarshal([]byte(persistData), inMemoryData); err != nil {
+ return xerror.Errorf(xerror.Normal, "unmarshal persistData failed, persistData: %s", persistData)
+ }
+ j.progress.InMemoryData = inMemoryData
+ }
+ return nil
+ }
+
+ rollback := func(err error, inMemoryData *inMemoryData) {
+ log.Errorf("need rollback, err: %+v", err)
+ j.progress.NextSubCheckpoint(RollbackTransaction, inMemoryData)
+ }
+
+ committed := func() {
+ log.Infof("txn committed, commitSeq: %d, cleanup", j.progress.CommitSeq)
+
+ inMemoryData := j.progress.InMemoryData.(*inMemoryData)
+ commitSeq := j.progress.CommitSeq
+ destTableIds := inMemoryData.DestTableIds
+ if j.SyncType == DBSync && len(j.progress.TableCommitSeqMap) > 0 {
+ for _, tableId := range destTableIds {
+ tableCommitSeq, ok := j.progress.TableCommitSeqMap[tableId]
+ if !ok {
+ continue
+ }
+
+ if tableCommitSeq < commitSeq {
+ j.progress.TableCommitSeqMap[tableId] = commitSeq
+ }
+ }
+
+ j.progress.Persist()
+ }
+ j.progress.Done()
+ }
+
+ dest := &j.Dest
+ switch j.progress.SubSyncState {
+ case Done:
+ if binlog == nil {
+ log.Errorf("binlog is nil, %+v", xerror.Errorf(xerror.Normal, "handle nil upsert binlog"))
+ return nil
+ }
+
+ data := binlog.GetData()
+ upsert, err := record.NewUpsertFromJson(data)
+ if err != nil {
+ return err
+ }
+ log.Debugf("upsert: %v", upsert)
+
+ // Step 1: get related tableRecords
+ var isTxnInsert bool = false
+ if len(upsert.Stids) > 0 {
+ if !featureTxnInsert {
+ log.Warnf("The txn insert is not supported yet")
+ return xerror.Errorf(xerror.Normal, "The txn insert is not supported yet")
+ }
+ if j.SyncType == DBSync {
+ log.Warnf("Txn insert is NOT supported when DBSync")
+ return xerror.Errorf(xerror.Normal, "Txn insert is NOT supported when DBSync")
+ }
+ isTxnInsert = true
+ }
+
+ tableRecords, err := j.getRelatedTableRecords(upsert)
+ if err != nil {
+ log.Errorf("get related table records failed, err: %+v", err)
+ }
+ if len(tableRecords) == 0 {
+ log.Debug("no related table records")
+ return nil
+ }
+
+ log.Debugf("tableRecords: %v", tableRecords)
+ destTableIds := make([]int64, 0, len(tableRecords))
+ if j.SyncType == DBSync {
+ for _, tableRecord := range tableRecords {
+ if destTableId, err := j.getDestTableIdBySrc(tableRecord.Id); err != nil {
+ return err
+ } else {
+ destTableIds = append(destTableIds, destTableId)
+ }
+ }
+ } else {
+ destTableIds = append(destTableIds, j.Dest.TableId)
+ }
+ inMemoryData := &inMemoryData{
+ CommitSeq: upsert.CommitSeq,
+ DestTableIds: destTableIds,
+ TableRecords: tableRecords,
+ IsTxnInsert: isTxnInsert,
+ SourceStids: upsert.Stids,
+ }
+ j.progress.NextSubVolatile(BeginTransaction, inMemoryData)
+
+ case BeginTransaction:
+ // Step 2: begin txn
+ inMemoryData := j.progress.InMemoryData.(*inMemoryData)
+ commitSeq := inMemoryData.CommitSeq
+ sourceStids := inMemoryData.SourceStids
+ isTxnInsert := inMemoryData.IsTxnInsert
+ log.Debugf("begin txn, dest: %v, commitSeq: %d", dest, commitSeq)
+
+ destRpc, err := j.factory.NewFeRpc(dest)
+ if err != nil {
+ return err
+ }
+
+ label := j.newLabel(commitSeq)
+
+ var beginTxnResp *festruct.TBeginTxnResult_
+ if isTxnInsert {
+ // when txn insert, give an array length in BeginTransaction, it will return a list of stid
+ beginTxnResp, err = destRpc.BeginTransactionForTxnInsert(dest, label, inMemoryData.DestTableIds, int64(len(sourceStids)))
+ } else {
+ beginTxnResp, err = destRpc.BeginTransaction(dest, label, inMemoryData.DestTableIds)
+ }
+
+ if err != nil {
+ return err
+ }
+ log.Debugf("resp: %v", beginTxnResp)
+ if beginTxnResp.GetStatus().GetStatusCode() != tstatus.TStatusCode_OK {
+ if isTableNotFound(beginTxnResp.GetStatus()) && j.SyncType == DBSync {
+ // It might caused by the staled TableMapping entries.
+ // In order to rebuild the dest table ids, this progress should be rollback.
+ j.progress.Rollback(j.SkipError)
+ for _, tableRecord := range inMemoryData.TableRecords {
+ delete(j.progress.TableMapping, tableRecord.Id)
+ }
+ }
+ return xerror.Errorf(xerror.Normal, "begin txn failed, status: %v", beginTxnResp.GetStatus())
+ }
+ txnId := beginTxnResp.GetTxnId()
+ if isTxnInsert {
+ destStids := beginTxnResp.GetSubTxnIds()
+ inMemoryData.DestStids = destStids
+ log.Debugf("TxnId: %d, DbId: %d, destStids: %v", txnId, beginTxnResp.GetDbId(), destStids)
+ } else {
+ log.Debugf("TxnId: %d, DbId: %d", txnId, beginTxnResp.GetDbId())
+ }
+
+ inMemoryData.TxnId = txnId
+ j.progress.NextSubCheckpoint(IngestBinlog, inMemoryData)
+
+ case IngestBinlog:
+ log.Debug("ingest binlog")
+ if err := updateInMemory(); err != nil {
+ return err
+ }
+ inMemoryData := j.progress.InMemoryData.(*inMemoryData)
+ tableRecords := inMemoryData.TableRecords
+ txnId := inMemoryData.TxnId
+ isTxnInsert := inMemoryData.IsTxnInsert
+
+ // make stidMap, source_stid to dest_stid
+ stidMap := make(map[int64]int64)
+ if isTxnInsert {
+ sourceStids := inMemoryData.SourceStids
+ destStids := inMemoryData.DestStids
+ if len(sourceStids) == len(destStids) {
+ for i := 0; i < len(sourceStids); i++ {
+ stidMap[sourceStids[i]] = destStids[i]
+ }
+ }
+ }
+
+ // Step 3: ingest binlog
+ if isTxnInsert {
+ // When txn insert, only one table can be inserted, so use the first DestTableId
+ destTableId := inMemoryData.DestTableIds[0]
+
+ // When txn insert, use subTxnInfos to commit rather than commitInfos.
+ subTxnInfos, err := j.ingestBinlogForTxnInsert(txnId, tableRecords, stidMap, destTableId)
+ if err != nil {
+ rollback(err, inMemoryData)
+ return err
+ } else {
+ inMemoryData.SubTxnInfos = subTxnInfos
+ j.progress.NextSubCheckpoint(CommitTransaction, inMemoryData)
+ }
+ } else {
+ commitInfos, err := j.ingestBinlog(txnId, tableRecords)
+ if err != nil {
+ rollback(err, inMemoryData)
+ return err
+ } else {
+ inMemoryData.CommitInfos = commitInfos
+ j.progress.NextSubCheckpoint(CommitTransaction, inMemoryData)
+ }
+ }
+
+ case CommitTransaction:
+ // Step 4: commit txn
+ log.Debug("commit txn")
+ if err := updateInMemory(); err != nil {
+ return err
+ }
+ inMemoryData := j.progress.InMemoryData.(*inMemoryData)
+ txnId := inMemoryData.TxnId
+ commitInfos := inMemoryData.CommitInfos
+
+ destRpc, err := j.factory.NewFeRpc(dest)
+ if err != nil {
+ rollback(err, inMemoryData)
+ break
+ }
+
+ isTxnInsert := inMemoryData.IsTxnInsert
+ subTxnInfos := inMemoryData.SubTxnInfos
+ var resp *festruct.TCommitTxnResult_
+ if isTxnInsert {
+ resp, err = destRpc.CommitTransactionForTxnInsert(dest, txnId, true, subTxnInfos)
+ } else {
+ resp, err = destRpc.CommitTransaction(dest, txnId, commitInfos)
+ }
+ if err != nil {
+ rollback(err, inMemoryData)
+ break
+ }
+
+ if statusCode := resp.Status.GetStatusCode(); statusCode == tstatus.TStatusCode_PUBLISH_TIMEOUT {
+ dest.WaitTransactionDone(txnId)
+ } else if statusCode != tstatus.TStatusCode_OK {
+ err := xerror.Errorf(xerror.Normal, "commit txn failed, status: %v", resp.Status)
+ rollback(err, inMemoryData)
+ break
+ }
+
+ log.Infof("TxnId: %d committed, resp: %v", txnId, resp)
+ committed()
+
+ return nil
+
+ case RollbackTransaction:
+ log.Debugf("Rollback txn")
+ // Not Step 5: just rollback txn
+ if err := updateInMemory(); err != nil {
+ return err
+ }
+
+ inMemoryData := j.progress.InMemoryData.(*inMemoryData)
+ txnId := inMemoryData.TxnId
+ destRpc, err := j.factory.NewFeRpc(dest)
+ if err != nil {
+ return err
+ }
+
+ resp, err := destRpc.RollbackTransaction(dest, txnId)
+ if err != nil {
+ return err
+ }
+ if resp.Status.GetStatusCode() != tstatus.TStatusCode_OK {
+ if isTxnNotFound(resp.Status) {
+ log.Warnf("txn not found, txnId: %d", txnId)
+ } else if isTxnAborted(resp.Status) {
+ log.Infof("txn already aborted, txnId: %d", txnId)
+ } else if isTxnCommitted(resp.Status) {
+ log.Infof("txn already committed, txnId: %d", txnId)
+ committed()
+ return nil
+ } else {
+ return xerror.Errorf(xerror.Normal, "rollback txn failed, status: %v", resp.Status)
+ }
+ }
+
+ log.Infof("rollback TxnId: %d resp: %v", txnId, resp)
+ j.progress.Rollback(j.SkipError)
+ return nil
+
+ default:
+ return xerror.Errorf(xerror.Normal, "invalid job sub sync state %d", j.progress.SubSyncState)
+ }
+
+ return j.handleUpsert(binlog)
+}
+
+// handleAddPartition
+func (j *Job) handleAddPartition(binlog *festruct.TBinlog) error {
+ log.Infof("handle add partition binlog, prevCommitSeq: %d, commitSeq: %d",
+ j.progress.PrevCommitSeq, j.progress.CommitSeq)
+
+ data := binlog.GetData()
+ addPartition, err := record.NewAddPartitionFromJson(data)
+ if err != nil {
+ return err
+ }
+
+ if j.isBinlogCommitted(addPartition.TableId, binlog.GetCommitSeq()) {
+ return nil
+ }
+
+ if addPartition.IsTemp {
+ log.Infof("skip add temporary partition because backup/restore table with temporary partitions is not supported yet")
+ return nil
+ }
+
+ var destTableName string
+ if j.SyncType == TableSync {
+ destTableName = j.Dest.Table
+ } else if j.SyncType == DBSync {
+ destTableId, err := j.getDestTableIdBySrc(addPartition.TableId)
+ if err != nil {
+ return err
+ }
+
+ if destTableName, err = j.destMeta.GetTableNameById(destTableId); err != nil {
+ return err
+ } else if destTableName == "" {
+ return xerror.Errorf(xerror.Normal, "tableId %d not found in destMeta", destTableId)
+ }
+ }
+ return j.IDest.AddPartition(destTableName, addPartition)
+}
+
+// handleDropPartition
+func (j *Job) handleDropPartition(binlog *festruct.TBinlog) error {
+ log.Infof("handle drop partition binlog, prevCommitSeq: %d, commitSeq: %d",
+ j.progress.PrevCommitSeq, j.progress.CommitSeq)
+
+ data := binlog.GetData()
+ dropPartition, err := record.NewDropPartitionFromJson(data)
+ if err != nil {
+ return err
+ }
+
+ if dropPartition.IsTemp {
+ log.Infof("Since the temporary partition is not synchronized to the downstream, this binlog is skipped.")
+ return nil
+ }
+
+ if j.isBinlogCommitted(dropPartition.TableId, binlog.GetCommitSeq()) {
+ return nil
+ }
+
+ var destTableName string
+ if j.SyncType == TableSync {
+ destTableName = j.Dest.Table
+ } else if j.SyncType == DBSync {
+ destTableId, err := j.getDestTableIdBySrc(dropPartition.TableId)
+ if err != nil {
+ return err
+ }
+
+ if destTableName, err = j.destMeta.GetTableNameById(destTableId); err != nil {
+ return err
+ } else if destTableName == "" {
+ return xerror.Errorf(xerror.Normal, "tableId %d not found in destMeta", destTableId)
+ }
+ }
+ return j.IDest.DropPartition(destTableName, dropPartition)
+}
+
+// handleCreateTable
+func (j *Job) handleCreateTable(binlog *festruct.TBinlog) error {
+ log.Infof("handle create table binlog, prevCommitSeq: %d, commitSeq: %d",
+ j.progress.PrevCommitSeq, j.progress.CommitSeq)
+
+ if j.SyncType != DBSync {
+ return xerror.Errorf(xerror.Normal, "invalid sync type: %v", j.SyncType)
+ }
+
+ data := binlog.GetData()
+ createTable, err := record.NewCreateTableFromJson(data)
+ if err != nil {
+ return err
+ }
+
+ if j.isBinlogCommitted(createTable.TableId, binlog.GetCommitSeq()) {
+ return nil
+ }
+
+ if featureCreateViewDropExists {
+ viewRegex := regexp.MustCompile(`(?i)^CREATE(\s+)VIEW`)
+ isCreateView := viewRegex.MatchString(createTable.Sql)
+ tableName := strings.TrimSpace(createTable.TableName)
+ if isCreateView && len(tableName) > 0 {
+ // drop view if exists
+ log.Infof("feature_create_view_drop_exists is enabled, try drop view %s before creating", tableName)
+ if err = j.IDest.DropView(tableName); err != nil {
+ return xerror.Wrapf(err, xerror.Normal, "drop view before create view %s, table id=%d",
+ tableName, createTable.TableId)
+ }
+ }
+ }
+
+ // Some operations, such as DROP TABLE, will be skiped in the partial/full snapshot,
+ // in that case, the dest table might already exists, so we need to check it before creating.
+ // If the dest table already exists, we need to do a partial snapshot.
+ //
+ // See test_cds_fullsync_tbl_drop_create.groovy for details
+ if j.SyncType == DBSync && !createTable.IsCreateView() {
+ if exists, err := j.IDest.CheckTableExistsByName(createTable.TableName); err != nil {
+ return err
+ } else if exists {
+ log.Warnf("the dest table %s already exists, force partial snapshot, commit seq: %d",
+ createTable.TableName, binlog.GetCommitSeq())
+ replace := true
+ return j.newPartialSnapshot(createTable.TableId, createTable.TableName, nil, replace)
+ }
+ }
+
+ if err = j.IDest.CreateTableOrView(createTable, j.Src.Database); err != nil {
+ return xerror.Wrapf(err, xerror.Normal, "create table %d", createTable.TableId)
+ }
+
+ j.srcMeta.ClearTablesCache()
+ j.destMeta.ClearTablesCache()
+
+ srcTableName := createTable.TableName
+ if len(srcTableName) == 0 {
+ // the field `TableName` is added after doris 2.0.3, to keep compatible, try read src table
+ // name from upstream, but the result might be wrong if upstream has executed rename/replace.
+ log.Infof("the table id %d is not found in the binlog record, get the name from the upstream", createTable.TableId)
+ srcTableName, err = j.srcMeta.GetTableNameById(createTable.TableId)
+ if err != nil {
+ return xerror.Errorf(xerror.Normal, "the table with id %d is not found in the upstream cluster, create table: %s",
+ createTable.TableId, createTable.String())
+ }
+ }
+
+ var destTableId int64
+ destTableId, err = j.destMeta.GetTableId(srcTableName)
+ if err != nil {
+ return err
+ }
+
+ if j.progress.TableMapping == nil {
+ j.progress.TableMapping = make(map[int64]int64)
+ }
+ j.progress.TableMapping[createTable.TableId] = destTableId
+ if j.progress.TableNameMapping == nil {
+ j.progress.TableNameMapping = make(map[int64]string)
+ }
+ j.progress.TableNameMapping[createTable.TableId] = srcTableName
+ j.progress.Done()
+ return nil
+}
+
+// handleDropTable
+func (j *Job) handleDropTable(binlog *festruct.TBinlog) error {
+ log.Infof("handle drop table binlog, prevCommitSeq: %d, commitSeq: %d",
+ j.progress.PrevCommitSeq, j.progress.CommitSeq)
+
+ if j.SyncType != DBSync {
+ return xerror.Errorf(xerror.Normal, "invalid sync type: %v", j.SyncType)
+ }
+
+ data := binlog.GetData()
+ dropTable, err := record.NewDropTableFromJson(data)
+ if err != nil {
+ return err
+ }
+
+ if !dropTable.IsView {
+ if _, ok := j.progress.TableMapping[dropTable.TableId]; !ok {
+ log.Warnf("the dest table is not found, skip drop table binlog, src table id: %d, commit seq: %d",
+ dropTable.TableId, binlog.GetCommitSeq())
+ return nil
+ }
+ }
+
+ if j.isBinlogCommitted(dropTable.TableId, binlog.GetCommitSeq()) {
+ return nil
+ }
+
+ tableName := dropTable.TableName
+ // deprecated, `TableName` has been added after doris 2.0.0
+ if tableName == "" {
+ dirtySrcTables := j.srcMeta.DirtyGetTables()
+ srcTable, ok := dirtySrcTables[dropTable.TableId]
+ if !ok {
+ return xerror.Errorf(xerror.Normal, "table not found, tableId: %d", dropTable.TableId)
+ }
+
+ tableName = srcTable.Name
+ }
+
+ if dropTable.IsView {
+ if err = j.IDest.DropView(tableName); err != nil {
+ return xerror.Wrapf(err, xerror.Normal, "drop view %s", tableName)
+ }
+ } else {
+ if err = j.IDest.DropTable(tableName, true); err != nil {
+ // In apache/doris/common/ErrorCode.java
+ //
+ // ERR_WRONG_OBJECT(1347, new byte[]{'H', 'Y', '0', '0', '0'}, "'%s.%s' is not %s. %s.")
+ if !strings.Contains(err.Error(), "is not TABLE") {
+ return xerror.Wrapf(err, xerror.Normal, "drop table %s", tableName)
+ } else if err = j.IDest.DropView(tableName); err != nil { // retry with drop view.
+ return xerror.Wrapf(err, xerror.Normal, "drop view %s", tableName)
+ }
+ }
+ }
+
+ j.srcMeta.ClearTablesCache()
+ j.destMeta.ClearTablesCache()
+ delete(j.progress.TableNameMapping, dropTable.TableId)
+ delete(j.progress.TableMapping, dropTable.TableId)
+ return nil
+}
+
+func (j *Job) handleDummy(binlog *festruct.TBinlog) error {
+ dummyCommitSeq := binlog.GetCommitSeq()
+
+ log.Infof("handle dummy binlog, need full sync. SyncType: %v, seq: %v", j.SyncType, dummyCommitSeq)
+
+ return j.newSnapshot(dummyCommitSeq)
+}
+
+// handleAlterJob
+func (j *Job) handleAlterJob(binlog *festruct.TBinlog) error {
+ log.Infof("handle alter job binlog, prevCommitSeq: %d, commitSeq: %d",
+ j.progress.PrevCommitSeq, j.progress.CommitSeq)
+
+ data := binlog.GetData()
+ alterJob, err := record.NewAlterJobV2FromJson(data)
+ if err != nil {
+ return err
+ }
+
+ if featureSkipRollupBinlogs && alterJob.Type == record.ALTER_JOB_ROLLUP {
+ log.Warnf("skip rollup alter job: %s", alterJob)
+ return nil
+ }
+
+ if alterJob.Type == record.ALTER_JOB_SCHEMA_CHANGE {
+ return j.handleSchemaChange(alterJob)
+ } else if alterJob.Type == record.ALTER_JOB_ROLLUP {
+ return j.handleAlterRollup(alterJob)
+ } else {
+ return xerror.Errorf(xerror.Normal, "unsupported alter job type: %s", alterJob.Type)
+ }
+}
+
+func (j *Job) handleAlterRollup(alterJob *record.AlterJobV2) error {
+ if !alterJob.IsFinished() {
+ switch alterJob.JobState {
+ case record.ALTER_JOB_STATE_PENDING:
+ // Once the rollup job step to WAITING_TXN, the upsert to the rollup index is allowed,
+ // but the dest index of the downstream cluster hasn't been created.
+ //
+ // To filter the upsert to the rollup index, save the shadow index ids here.
+ if j.progress.ShadowIndexes == nil {
+ j.progress.ShadowIndexes = make(map[int64]int64)
+ }
+ j.progress.ShadowIndexes[alterJob.RollupIndexId] = alterJob.BaseIndexId
+ case record.ALTER_JOB_STATE_CANCELLED:
+ // clear the shadow indexes
+ delete(j.progress.ShadowIndexes, alterJob.RollupIndexId)
+ }
+ return nil
+ }
+
+ // Once partial snapshot finished, the rollup indexes will be convert to normal index.
+ delete(j.progress.ShadowIndexes, alterJob.RollupIndexId)
+
+ replace := true
+ return j.newPartialSnapshot(alterJob.TableId, alterJob.TableName, nil, replace)
+}
+
+func (j *Job) handleSchemaChange(alterJob *record.AlterJobV2) error {
+ if !alterJob.IsFinished() {
+ switch alterJob.JobState {
+ case record.ALTER_JOB_STATE_PENDING:
+ // Once the schema change step to WAITING_TXN, the upsert to the shadow indexes is allowed,
+ // but the dest indexes of the downstream cluster hasn't been created.
+ //
+ // To filter the upsert to the shadow indexes, save the shadow index ids here.
+ if j.progress.ShadowIndexes == nil {
+ j.progress.ShadowIndexes = make(map[int64]int64)
+ }
+ for shadowIndexId, originIndexId := range alterJob.ShadowIndexes {
+ j.progress.ShadowIndexes[shadowIndexId] = originIndexId
+ }
+ case record.ALTER_JOB_STATE_CANCELLED:
+ // clear the shadow indexes
+ for shadowIndexId := range alterJob.ShadowIndexes {
+ delete(j.progress.ShadowIndexes, shadowIndexId)
+ }
+ }
+ return nil
+ }
+
+ // drop table dropTableSql
+ var destTableName string
+ if j.SyncType == TableSync {
+ destTableName = j.Dest.Table
+ } else {
+ destTableName = alterJob.TableName
+ }
+
+ if featureSchemaChangePartialSync && alterJob.Type == record.ALTER_JOB_SCHEMA_CHANGE {
+ // Once partial snapshot finished, the shadow indexes will be convert to normal indexes.
+ for shadowIndexId := range alterJob.ShadowIndexes {
+ delete(j.progress.ShadowIndexes, shadowIndexId)
+ }
+
+ replaceTable := true
+ return j.newPartialSnapshot(alterJob.TableId, alterJob.TableName, nil, replaceTable)
+ }
+
+ var allViewDeleted bool = false
+ for {
+ // before drop table, drop related view firstly
+ if !allViewDeleted {
+ views, err := j.IDest.GetAllViewsFromTable(destTableName)
+ if err != nil {
+ log.Errorf("when alter job, get view from table failed, err : %v", err)
+ continue
+ }
+
+ var dropViewFailed bool = false
+ for _, view := range views {
+ if err := j.IDest.DropView(view); err != nil {
+ log.Errorf("when alter job, drop view %s failed, err : %v", view, err)
+ dropViewFailed = true
+ }
+ }
+ if dropViewFailed {
+ continue
+ }
+
+ allViewDeleted = true
+ }
+
+ if err := j.IDest.DropTable(destTableName, true); err == nil {
+ break
+ }
+ }
+
+ return j.newSnapshot(j.progress.CommitSeq)
+}
+
+// handleLightningSchemaChange
+func (j *Job) handleLightningSchemaChange(binlog *festruct.TBinlog) error {
+ log.Infof("handle lightning schema change binlog, prevCommitSeq: %d, commitSeq: %d",
+ j.progress.PrevCommitSeq, j.progress.CommitSeq)
+
+ data := binlog.GetData()
+ lightningSchemaChange, err := record.NewModifyTableAddOrDropColumnsFromJson(data)
+ if err != nil {
+ return err
+ }
+
+ if j.isBinlogCommitted(lightningSchemaChange.TableId, binlog.GetCommitSeq()) {
+ return nil
+ }
+
+ tableAlias := ""
+ if j.isTableSyncWithAlias() {
+ tableAlias = j.Dest.Table
+ }
+ return j.IDest.LightningSchemaChange(j.Src.Database, tableAlias, lightningSchemaChange)
+}
+
+// handle rename column
+func (j *Job) handleRenameColumn(binlog *festruct.TBinlog) error {
+ log.Infof("handle rename column binlog, prevCommitSeq: %d, commitSeq: %d",
+ j.progress.PrevCommitSeq, j.progress.CommitSeq)
+
+ data := binlog.GetData()
+ renameColumn, err := record.NewRenameColumnFromJson(data)
+ if err != nil {
+ return err
+ }
+
+ return j.handleRenameColumnRecord(binlog.GetCommitSeq(), renameColumn)
+}
+
+func (j *Job) handleRenameColumnRecord(commitSeq int64, renameColumn *record.RenameColumn) error {
+ if j.isBinlogCommitted(renameColumn.TableId, commitSeq) {
+ return nil
+ }
+
+ var destTableName string
+ if j.SyncType == TableSync {
+ destTableName = j.Dest.Table
+ } else {
+ var err error
+ destTableName, err = j.getDestNameBySrcId(renameColumn.TableId)
+ if err != nil {
+ return err
+ }
+ }
+
+ return j.IDest.RenameColumn(destTableName, renameColumn)
+}
+
+// handle modify comment
+func (j *Job) handleModifyComment(binlog *festruct.TBinlog) error {
+ log.Infof("handle modify comment binlog, prevCommitSeq: %d, commitSeq: %d",
+ j.progress.PrevCommitSeq, j.progress.CommitSeq)
+
+ data := binlog.GetData()
+ modifyComment, err := record.NewModifyCommentFromJson(data)
+ if err != nil {
+ return err
+ }
+
+ return j.handleModifyCommentRecord(binlog.GetCommitSeq(), modifyComment)
+}
+
+func (j *Job) handleModifyCommentRecord(commitSeq int64, modifyComment *record.ModifyComment) error {
+ if j.isBinlogCommitted(modifyComment.TblId, commitSeq) {
+ return nil
+ }
+
+ var destTableName string
+ if j.SyncType == TableSync {
+ destTableName = j.Dest.Table
+ } else {
+ var err error
+ destTableName, err = j.getDestNameBySrcId(modifyComment.TblId)
+ if err != nil {
+ return err
+ }
+ }
+
+ return j.IDest.ModifyComment(destTableName, modifyComment)
+}
+
+func (j *Job) handleTruncateTable(binlog *festruct.TBinlog) error {
+ log.Infof("handle truncate table binlog, prevCommitSeq: %d, commitSeq: %d",
+ j.progress.PrevCommitSeq, j.progress.CommitSeq)
+
+ data := binlog.GetData()
+ truncateTable, err := record.NewTruncateTableFromJson(data)
+ if err != nil {
+ return err
+ }
+
+ if j.isBinlogCommitted(truncateTable.TableId, binlog.GetCommitSeq()) {
+ return nil
+ }
+
+ var destTableName string
+ switch j.SyncType {
+ case DBSync:
+ destTableName = truncateTable.TableName
+ case TableSync:
+ destTableName = j.Dest.Table
+ default:
+ return xerror.Panicf(xerror.Normal, "invalid sync type: %v", j.SyncType)
+ }
+
+ err = j.IDest.TruncateTable(destTableName, truncateTable)
+ if err == nil {
+ j.srcMeta.ClearTable(j.Src.Database, truncateTable.TableName)
+ j.destMeta.ClearTable(j.Dest.Database, destTableName)
+ }
+
+ return err
+}
+
+func (j *Job) handleReplacePartitions(binlog *festruct.TBinlog) error {
+ log.Infof("handle replace partitions binlog, prevCommitSeq: %d, commitSeq: %d",
+ j.progress.PrevCommitSeq, j.progress.CommitSeq)
+
+ data := binlog.GetData()
+ replacePartition, err := record.NewReplacePartitionFromJson(data)
+ if err != nil {
+ return err
+ }
+
+ if j.isBinlogCommitted(replacePartition.TableId, binlog.GetCommitSeq()) {
+ return nil
+ }
+
+ if !replacePartition.StrictRange {
+ log.Warnf("replacing partitions with non strict range is not supported yet, replace partition record: %s", string(data))
+ return j.newSnapshot(j.progress.CommitSeq)
+ }
+
+ if replacePartition.UseTempName {
+ log.Warnf("replacing partitions with use tmp name is not supported yet, replace partition record: %s", string(data))
+ return j.newSnapshot(j.progress.CommitSeq)
+ }
+
+ oldPartitions := strings.Join(replacePartition.Partitions, ",")
+ newPartitions := strings.Join(replacePartition.TempPartitions, ",")
+ log.Infof("table %s replace partitions %s with temp partitions %s",
+ replacePartition.TableName, oldPartitions, newPartitions)
+
+ partitions := replacePartition.Partitions
+ if replacePartition.UseTempName {
+ partitions = replacePartition.TempPartitions
+ }
+
+ return j.newPartialSnapshot(replacePartition.TableId, replacePartition.TableName, partitions, false)
+}
+
+func (j *Job) handleModifyPartitions(binlog *festruct.TBinlog) error {
+ log.Infof("handle modify partitions binlog, prevCommitSeq: %d, commitSeq: %d",
+ j.progress.PrevCommitSeq, j.progress.CommitSeq)
+
+ log.Warnf("modify partitions is not supported now, binlog data: %s", binlog.GetData())
+ return nil
+}
+
+// handle rename table
+func (j *Job) handleRenameTable(binlog *festruct.TBinlog) error {
+ log.Infof("handle rename table binlog, prevCommitSeq: %d, commitSeq: %d",
+ j.progress.PrevCommitSeq, j.progress.CommitSeq)
+
+ data := binlog.GetData()
+ renameTable, err := record.NewRenameTableFromJson(data)
+ if err != nil {
+ return err
+ }
+
+ return j.handleRenameTableRecord(binlog.GetCommitSeq(), renameTable)
+}
+
+func (j *Job) handleRenameTableRecord(commitSeq int64, renameTable *record.RenameTable) error {
+ // don't support rename table when table sync
+ if j.SyncType == TableSync {
+ log.Warnf("rename table is not supported when table sync, consider rebuilding this job instead")
+ return xerror.Errorf(xerror.Normal, "rename table is not supported when table sync, consider rebuilding this job instead")
}
- return tableRecords, nil
-}
+ if j.isBinlogCommitted(renameTable.TableId, commitSeq) {
+ return nil
+ }
-// Table ingestBinlog
-// TODO: add check success, check ingestBinlog commitInfo
-// TODO: rewrite by use tableId
-func (j *Job) ingestBinlog(txnId int64, tableRecords []*record.TableRecord) ([]*ttypes.TTabletCommitInfo, error) {
- log.Infof("ingestBinlog, txnId: %d", txnId)
+ var destTableName string
+ if j.SyncType == TableSync {
+ destTableName = j.Dest.Table
+ } else {
+ var err error
+ destTableName, err = j.getDestNameBySrcId(renameTable.TableId)
+ if err != nil {
+ return err
+ }
+ }
- job, err := j.jobFactory.CreateJob(NewIngestContext(txnId, tableRecords), j, "IngestBinlog")
- if err != nil {
- return nil, err
+ if renameTable.NewTableName != "" && renameTable.OldTableName == "" {
+ // for compatible with old doris version
+ //
+ // If we synchronize all operations accurately, then the old table name should be equal to
+ // the destination table name.
+ renameTable.OldTableName = destTableName
}
- ingestBinlogJob, ok := job.(*IngestBinlogJob)
- if !ok {
- return nil, xerror.Errorf(xerror.Normal, "invalid job type, job: %+v", job)
+ err := j.IDest.RenameTable(destTableName, renameTable)
+ if err != nil {
+ return err
}
- job.Run()
- if err := job.Error(); err != nil {
- return nil, err
+ j.destMeta.GetTables()
+ if j.progress.TableNameMapping == nil {
+ j.progress.TableNameMapping = make(map[int64]string)
}
- return ingestBinlogJob.CommitInfos(), nil
+ j.progress.TableNameMapping[renameTable.TableId] = renameTable.NewTableName
+
+ return nil
}
-// TODO: handle error by abort txn
-func (j *Job) handleUpsert(binlog *festruct.TBinlog) error {
- log.Infof("handle upsert binlog, sub sync state: %s", j.progress.SubSyncState)
+func (j *Job) handleReplaceTable(binlog *festruct.TBinlog) error {
+ log.Infof("handle replace table binlog, prevCommitSeq: %d, commitSeq: %d",
+ j.progress.PrevCommitSeq, j.progress.CommitSeq)
- // inMemory will be update in state machine, but progress keep any, so progress.inMemory is also latest, well call NextSubCheckpoint don't need to upate inMemory in progress
- // TODO(IMPROVE): some steps not need all data, so we can reset some data in progress, such as RollbackTransaction only need txnId
- type inMemoryData struct {
- CommitSeq int64 `json:"commit_seq"`
- TxnId int64 `json:"txn_id"`
- DestTableIds []int64 `json:"dest_table_ids"`
- TableRecords []*record.TableRecord `json:"table_records"`
- CommitInfos []*ttypes.TTabletCommitInfo `json:"commit_infos"`
+ record, err := record.NewReplaceTableRecordFromJson(binlog.GetData())
+ if err != nil {
+ return err
}
- upateInMemory := func() error {
- if j.progress.InMemoryData == nil {
- persistData := j.progress.PersistData
- inMemoryData := &inMemoryData{}
- if err := json.Unmarshal([]byte(persistData), inMemoryData); err != nil {
- return xerror.Errorf(xerror.Normal, "unmarshal persistData failed, persistData: %s", persistData)
- }
- j.progress.InMemoryData = inMemoryData
- }
- return nil
- }
+ return j.handleReplaceTableRecord(binlog.GetCommitSeq(), record)
+}
- rollback := func(err error, inMemoryData *inMemoryData) {
- log.Errorf("need rollback, err: %+v", err)
- j.progress.NextSubCheckpoint(RollbackTransaction, inMemoryData)
+func (j *Job) handleReplaceTableRecord(commitSeq int64, record *record.ReplaceTableRecord) error {
+ if j.SyncType == TableSync {
+ log.Infof("replace table %s with fullsync in table sync, reset src table id from %d to %d, swap: %t",
+ record.OriginTableName, record.OriginTableId, record.NewTableId, record.SwapTable)
+ j.Src.TableId = record.NewTableId
+ return j.newSnapshot(commitSeq)
}
- dest := &j.Dest
- switch j.progress.SubSyncState {
- case Done:
- if binlog == nil {
- log.Errorf("binlog is nil, %+v", xerror.Errorf(xerror.Normal, "handle nil upsert binlog"))
- return nil
- }
+ if j.isBinlogCommitted(record.OriginTableId, commitSeq) {
+ return nil
+ }
- data := binlog.GetData()
- upsert, err := record.NewUpsertFromJson(data)
- if err != nil {
- return err
- }
- log.Debugf("upsert: %v", upsert)
+ toName := record.OriginTableName
+ fromName := record.NewTableName
+ if err := j.IDest.ReplaceTable(fromName, toName, record.SwapTable); err != nil {
+ return err
+ }
- // TODO(Fix)
- // commitSeq := upsert.CommitSeq
+ j.destMeta.GetTables() // update id <=> name cache
+ if j.progress.TableNameMapping == nil {
+ j.progress.TableNameMapping = make(map[int64]string)
+ }
+ if record.SwapTable {
+ // keep table mapping
+ j.progress.TableNameMapping[record.OriginTableId] = record.NewTableName
+ j.progress.TableNameMapping[record.NewTableId] = record.OriginTableName
+ } else { // delete table1
+ j.progress.TableNameMapping[record.NewTableId] = record.OriginTableName
+ delete(j.progress.TableNameMapping, record.OriginTableId)
+ delete(j.progress.TableMapping, record.OriginTableId)
+ }
- // Step 1: get related tableRecords
- tableRecords, err := j.getReleatedTableRecords(upsert)
- if err != nil {
- log.Errorf("get releated table records failed, err: %+v", err)
- }
- if len(tableRecords) == 0 {
- log.Debug("no releated table records")
- return nil
- }
+ return nil
+}
- log.Debugf("tableRecords: %v", tableRecords)
- destTableIds := make([]int64, 0, len(tableRecords))
- if j.SyncType == DBSync {
- for _, tableRecord := range tableRecords {
- if destTableId, err := j.getDestTableIdBySrc(tableRecord.Id); err != nil {
- return err
- } else {
- destTableIds = append(destTableIds, destTableId)
- }
- }
- } else {
- destTableIds = append(destTableIds, j.Dest.TableId)
- }
- inMemoryData := &inMemoryData{
- CommitSeq: upsert.CommitSeq,
- DestTableIds: destTableIds,
- TableRecords: tableRecords,
- }
- j.progress.NextSubVolatile(BeginTransaction, inMemoryData)
+func (j *Job) handleModifyTableAddOrDropInvertedIndices(binlog *festruct.TBinlog) error {
+ log.Infof("handle modify table add or drop inverted indices binlog, prevCommitSeq: %d, commitSeq: %d",
+ j.progress.PrevCommitSeq, j.progress.CommitSeq)
- case BeginTransaction:
- // Step 2: begin txn
- inMemoryData := j.progress.InMemoryData.(*inMemoryData)
- commitSeq := inMemoryData.CommitSeq
- log.Debugf("begin txn, dest: %v, commitSeq: %d", dest, commitSeq)
+ data := binlog.GetData()
+ modifyTableAddOrDropInvertedIndices, err := record.NewModifyTableAddOrDropInvertedIndicesFromJson(data)
+ if err != nil {
+ return err
+ }
- destRpc, err := j.rpcFactory.NewFeRpc(dest)
- if err != nil {
- return err
- }
+ return j.handleModifyTableAddOrDropInvertedIndicesRecord(binlog.GetCommitSeq(), modifyTableAddOrDropInvertedIndices)
+}
- label := j.newLabel(commitSeq)
+func (j *Job) handleModifyTableAddOrDropInvertedIndicesRecord(commitSeq int64, record *record.ModifyTableAddOrDropInvertedIndices) error {
+ if j.isBinlogCommitted(record.TableId, commitSeq) {
+ return nil
+ }
- beginTxnResp, err := destRpc.BeginTransaction(dest, label, inMemoryData.DestTableIds)
+ var destTableName string
+ if j.SyncType == TableSync {
+ destTableName = j.Dest.Table
+ } else {
+ var err error
+ destTableName, err = j.getDestNameBySrcId(record.TableId)
if err != nil {
return err
}
- log.Debugf("resp: %v", beginTxnResp)
- if beginTxnResp.GetStatus().GetStatusCode() != tstatus.TStatusCode_OK {
- return xerror.Errorf(xerror.Normal, "begin txn failed, status: %v", beginTxnResp.GetStatus())
- }
- txnId := beginTxnResp.GetTxnId()
- log.Debugf("TxnId: %d, DbId: %d", txnId, beginTxnResp.GetDbId())
-
- inMemoryData.TxnId = txnId
- j.progress.NextSubCheckpoint(IngestBinlog, inMemoryData)
-
- case IngestBinlog:
- log.Debug("ingest binlog")
- if err := upateInMemory(); err != nil {
- return err
- }
- inMemoryData := j.progress.InMemoryData.(*inMemoryData)
- tableRecords := inMemoryData.TableRecords
- txnId := inMemoryData.TxnId
+ }
- // TODO: 反查现在的状况
- // Step 3: ingest binlog
- var commitInfos []*ttypes.TTabletCommitInfo
- commitInfos, err := j.ingestBinlog(txnId, tableRecords)
- if err != nil {
- rollback(err, inMemoryData)
- } else {
- log.Debugf("commitInfos: %v", commitInfos)
- inMemoryData.CommitInfos = commitInfos
- j.progress.NextSubCheckpoint(CommitTransaction, inMemoryData)
- }
+ return j.IDest.LightningIndexChange(destTableName, record)
+}
- case CommitTransaction:
- // Step 4: commit txn
- log.Debug("commit txn")
- if err := upateInMemory(); err != nil {
- return err
- }
- inMemoryData := j.progress.InMemoryData.(*inMemoryData)
- txnId := inMemoryData.TxnId
- commitInfos := inMemoryData.CommitInfos
+func (j *Job) handleIndexChangeJob(binlog *festruct.TBinlog) error {
+ log.Infof("handle index change job binlog, prevCommitSeq: %d, commitSeq: %d",
+ j.progress.PrevCommitSeq, j.progress.CommitSeq)
- destRpc, err := j.rpcFactory.NewFeRpc(dest)
- if err != nil {
- rollback(err, inMemoryData)
- break
- }
+ data := binlog.GetData()
+ indexChangeJob, err := record.NewIndexChangeJobFromJson(data)
+ if err != nil {
+ return err
+ }
- resp, err := destRpc.CommitTransaction(dest, txnId, commitInfos)
- if err != nil {
- rollback(err, inMemoryData)
- break
- }
+ return j.handleIndexChangeJobRecord(binlog.GetCommitSeq(), indexChangeJob)
+}
- if statusCode := resp.Status.GetStatusCode(); statusCode == tstatus.TStatusCode_PUBLISH_TIMEOUT {
- dest.WaitTransactionDone(txnId)
- } else if statusCode != tstatus.TStatusCode_OK {
- err := xerror.Errorf(xerror.Normal, "commit txn failed, status: %v", resp.Status)
- rollback(err, inMemoryData)
- break
- }
+func (j *Job) handleIndexChangeJobRecord(commitSeq int64, indexChangeJob *record.IndexChangeJob) error {
+ if j.isBinlogCommitted(indexChangeJob.TableId, commitSeq) {
+ return nil
+ }
- log.Infof("commit TxnId: %d resp: %v", txnId, resp)
- commitSeq := j.progress.CommitSeq
- destTableIds := inMemoryData.DestTableIds
- if j.SyncType == DBSync && len(j.progress.TableCommitSeqMap) > 0 {
- for _, tableId := range destTableIds {
- tableCommitSeq, ok := j.progress.TableCommitSeqMap[tableId]
- if !ok {
- continue
- }
+ if indexChangeJob.JobState != record.INDEX_CHANGE_JOB_STATE_FINISHED ||
+ indexChangeJob.IsDropOp {
+ log.Debugf("skip index change job binlog, job state: %s, is drop op: %t",
+ indexChangeJob.JobState, indexChangeJob.IsDropOp)
+ return nil
+ }
- if tableCommitSeq < commitSeq {
- j.progress.TableCommitSeqMap[tableId] = commitSeq
- }
- // TODO: [PERFORMANCE] remove old commit seq
- }
+ var destTableName string
+ if j.SyncType == TableSync {
+ destTableName = j.Dest.Table
+ } else {
+ destTableName = indexChangeJob.TableName
+ }
- j.progress.Persist()
- }
- j.progress.Done()
- return nil
+ return j.IDest.BuildIndex(destTableName, indexChangeJob)
+}
- case RollbackTransaction:
- log.Debugf("Rollback txn")
- // Not Step 5: just rollback txn
- if err := upateInMemory(); err != nil {
- return err
- }
+// handle alter view def
+func (j *Job) handleAlterViewDef(binlog *festruct.TBinlog) error {
+ log.Infof("handle alter view def binlog, prevCommitSeq: %d, commitSeq: %d",
+ j.progress.PrevCommitSeq, j.progress.CommitSeq)
- inMemoryData := j.progress.InMemoryData.(*inMemoryData)
- txnId := inMemoryData.TxnId
- destRpc, err := j.rpcFactory.NewFeRpc(dest)
- if err != nil {
- return err
- }
+ data := binlog.GetData()
+ alterView, err := record.NewAlterViewFromJson(data)
+ if err != nil {
+ return err
+ }
+ return j.handleAlterViewDefRecord(binlog.GetCommitSeq(), alterView)
+}
- resp, err := destRpc.RollbackTransaction(dest, txnId)
- if err != nil {
- return err
- }
- if resp.Status.GetStatusCode() != tstatus.TStatusCode_OK {
- return xerror.Errorf(xerror.Normal, "rollback txn failed, status: %v", resp.Status)
- }
- log.Infof("rollback TxnId: %d resp: %v", txnId, resp)
- j.progress.Done()
+func (j *Job) handleAlterViewDefRecord(commitSeq int64, alterView *record.AlterView) error {
+ if j.isBinlogCommitted(alterView.TableId, commitSeq) {
return nil
+ }
- default:
- return xerror.Errorf(xerror.Normal, "invalid job sub sync state %d", j.progress.SubSyncState)
+ viewName, err := j.getDestNameBySrcId(alterView.TableId)
+ if err != nil {
+ return err
}
- return j.handleUpsert(binlog)
+ return j.IDest.AlterViewDef(j.Src.Database, viewName, alterView)
}
-// handleAddPartition
-func (j *Job) handleAddPartition(binlog *festruct.TBinlog) error {
- log.Infof("handle add partition binlog")
+func (j *Job) handleRenamePartition(binlog *festruct.TBinlog) error {
+ log.Infof("handle rename partition binlog, prevCommitSeq: %d, commitSeq: %d",
+ j.progress.PrevCommitSeq, j.progress.CommitSeq)
data := binlog.GetData()
- addPartition, err := record.NewAddPartitionFromJson(data)
+ renamePartition, err := record.NewRenamePartitionFromJson(data)
if err != nil {
return err
}
+ return j.handleRenamePartitionRecord(binlog.GetCommitSeq(), renamePartition)
+}
+
+func (j *Job) handleRenamePartitionRecord(commitSeq int64, renamePartition *record.RenamePartition) error {
+ if j.isBinlogCommitted(renamePartition.TableId, commitSeq) {
+ return nil
+ }
- destDbName := j.Dest.Database
var destTableName string
if j.SyncType == TableSync {
destTableName = j.Dest.Table
- } else if j.SyncType == DBSync {
- destTableName, err = j.destMeta.GetTableNameById(addPartition.TableId)
+ } else {
+ var err error
+ destTableName, err = j.getDestNameBySrcId(renamePartition.TableId)
if err != nil {
return err
}
}
- // addPartitionSql = "ALTER TABLE " + sql
- addPartitionSql := fmt.Sprintf("ALTER TABLE %s.%s %s", destDbName, destTableName, addPartition.Sql)
- log.Infof("addPartitionSql: %s", addPartitionSql)
- return j.IDest.Exec(addPartitionSql)
+ newPartition := renamePartition.NewPartitionName
+ oldPartition := renamePartition.OldPartitionName
+ if oldPartition == "" {
+ log.Warnf("old partition name is empty, sync partition via partial snapshot, "+
+ "new partition: %s, partition id: %d, table id: %d, commit seq: %d",
+ newPartition, renamePartition.PartitionId, renamePartition.TableId, commitSeq)
+ replace := true
+ tableName := destTableName
+ if j.isTableSyncWithAlias() {
+ tableName = j.Src.Table
+ }
+ return j.newPartialSnapshot(renamePartition.TableId, tableName, nil, replace)
+ }
+ return j.IDest.RenamePartition(destTableName, oldPartition, newPartition)
}
-// handleDropPartition
-func (j *Job) handleDropPartition(binlog *festruct.TBinlog) error {
- log.Infof("handle drop partition binlog")
+func (j *Job) handleRenameRollup(binlog *festruct.TBinlog) error {
+ log.Infof("handle rename rollup binlog, prevCommitSeq: %d, commitSeq: %d",
+ j.progress.PrevCommitSeq, j.progress.CommitSeq)
data := binlog.GetData()
- dropPartition, err := record.NewDropPartitionFromJson(data)
+ renameRollup, err := record.NewRenameRollupFromJson(data)
if err != nil {
return err
}
- destDbName := j.Dest.Database
+ return j.handleRenameRollupRecord(binlog.GetCommitSeq(), renameRollup)
+}
+
+func (j *Job) handleRenameRollupRecord(commitSeq int64, renameRollup *record.RenameRollup) error {
+ if j.isBinlogCommitted(renameRollup.TableId, commitSeq) {
+ return nil
+ }
+
var destTableName string
if j.SyncType == TableSync {
destTableName = j.Dest.Table
- } else if j.SyncType == DBSync {
- destTableName, err = j.destMeta.GetTableNameById(dropPartition.TableId)
+ } else {
+ var err error
+ destTableName, err = j.getDestNameBySrcId(renameRollup.TableId)
if err != nil {
return err
}
}
- // dropPartitionSql = "ALTER TABLE " + sql
- dropPartitionSql := fmt.Sprintf("ALTER TABLE %s.%s %s", destDbName, destTableName, dropPartition.Sql)
- log.Infof("dropPartitionSql: %s", dropPartitionSql)
- return j.IDest.Exec(dropPartitionSql)
-}
-
-// handleCreateTable
-func (j *Job) handleCreateTable(binlog *festruct.TBinlog) error {
- log.Infof("handle create table binlog")
-
- if j.SyncType != DBSync {
- return xerror.Errorf(xerror.Normal, "invalid sync type: %v", j.SyncType)
- }
-
- data := binlog.GetData()
- createTable, err := record.NewCreateTableFromJson(data)
- if err != nil {
- return err
+ newRollup := renameRollup.NewRollupName
+ oldRollup := renameRollup.OldRollupName
+ if oldRollup == "" {
+ log.Warnf("old rollup name is empty, sync rollup via partial snapshot, "+
+ "new rollup: %s, index id: %d, table id: %d, commit seq: %d",
+ newRollup, renameRollup.IndexId, renameRollup.TableId, commitSeq)
+ replace := true
+ tableName := destTableName
+ if j.isTableSyncWithAlias() {
+ tableName = j.Src.Table
+ }
+ return j.newPartialSnapshot(renameRollup.TableId, tableName, nil, replace)
}
- sql := createTable.Sql
- log.Infof("createTableSql: %s", sql)
- // HACK: for drop table
- err = j.IDest.DbExec(sql)
- j.srcMeta.GetTables()
- j.destMeta.GetTables()
- return err
+ return j.IDest.RenameRollup(destTableName, oldRollup, newRollup)
}
-// handleDropTable
-func (j *Job) handleDropTable(binlog *festruct.TBinlog) error {
- log.Infof("handle drop table binlog")
-
- if j.SyncType != DBSync {
- return xerror.Errorf(xerror.Normal, "invalid sync type: %v", j.SyncType)
- }
+func (j *Job) handleDropRollup(binlog *festruct.TBinlog) error {
+ log.Infof("handle drop rollup binlog, prevCommitSeq: %d, commitSeq: %d",
+ j.progress.PrevCommitSeq, j.progress.CommitSeq)
data := binlog.GetData()
- dropTable, err := record.NewDropTableFromJson(data)
+ dropRollup, err := record.NewDropRollupFromJson(data)
if err != nil {
return err
}
- tableName := dropTable.TableName
- // depreated
- if tableName == "" {
- dirtySrcTables := j.srcMeta.DirtyGetTables()
- srcTable, ok := dirtySrcTables[dropTable.TableId]
- if !ok {
- return xerror.Errorf(xerror.Normal, "table not found, tableId: %d", dropTable.TableId)
- }
-
- tableName = srcTable.Name
- }
-
- sql := fmt.Sprintf("DROP TABLE %s FORCE", tableName)
- log.Infof("dropTableSql: %s", sql)
- err = j.IDest.DbExec(sql)
- j.srcMeta.GetTables()
- j.destMeta.GetTables()
- return err
+ return j.handleDropRollupRecord(binlog.GetCommitSeq(), dropRollup)
}
-func (j *Job) handleDummy(binlog *festruct.TBinlog) error {
- dummyCommitSeq := binlog.GetCommitSeq()
+func (j *Job) handleDropRollupRecord(commitSeq int64, dropRollup *record.DropRollup) error {
+ if j.isBinlogCommitted(dropRollup.TableId, commitSeq) {
+ return nil
+ }
- log.Infof("handle dummy binlog, need full sync. SyncType: %v, seq: %v", j.SyncType, dummyCommitSeq)
+ var destTableName string
+ if j.SyncType == TableSync {
+ destTableName = j.Dest.Table
+ } else {
+ destTableName = dropRollup.TableName
+ }
- return j.newSnapshot(dummyCommitSeq)
+ return j.IDest.DropRollup(destTableName, dropRollup.IndexName)
}
-// handleAlterJob
-func (j *Job) handleAlterJob(binlog *festruct.TBinlog) error {
- log.Infof("handle alter job binlog")
+func (j *Job) handleRecoverInfo(binlog *festruct.TBinlog) error {
+ log.Infof("handle recoverInfo binlog, prevCommitSeq: %d, commitSeq: %d",
+ j.progress.PrevCommitSeq, j.progress.CommitSeq)
data := binlog.GetData()
- alterJob, err := record.NewAlterJobV2FromJson(data)
+ recoverInfo, err := record.NewRecoverInfoFromJson(data)
if err != nil {
return err
}
- if alterJob.TableName == "" {
- return xerror.Errorf(xerror.Normal, "invalid alter job, tableName: %s", alterJob.TableName)
- }
- if !alterJob.IsFinished() {
+
+ return j.handleRecoverInfoRecord(binlog.GetCommitSeq(), recoverInfo)
+}
+
+func (j *Job) handleRecoverInfoRecord(commitSeq int64, recoverInfo *record.RecoverInfo) error {
+ if j.isBinlogCommitted(recoverInfo.TableId, commitSeq) {
return nil
}
- // HACK: busy loop for success
- // TODO: Add to state machine
- for {
- // drop table dropTableSql
- // TODO: [IMPROVEMENT] use rename table instead of drop table
- var dropTableSql string
- if j.SyncType == TableSync {
- dropTableSql = fmt.Sprintf("DROP TABLE %s FORCE", j.Dest.Table)
+ if recoverInfo.IsRecoverTable() {
+ var tableName string
+ if recoverInfo.NewTableName != "" {
+ tableName = recoverInfo.NewTableName
} else {
- dropTableSql = fmt.Sprintf("DROP TABLE %s FORCE", alterJob.TableName)
- }
- log.Infof("dropTableSql: %s", dropTableSql)
-
- if err := j.destMeta.DbExec(dropTableSql); err == nil {
- break
+ tableName = recoverInfo.TableName
}
+ log.Infof("recover info with for table %s, will trigger partial sync", tableName)
+ return j.newPartialSnapshot(recoverInfo.TableId, tableName, nil, true)
}
- return j.newSnapshot(j.progress.CommitSeq)
+ var partitions []string
+ if recoverInfo.NewPartitionName != "" {
+ partitions = append(partitions, recoverInfo.NewPartitionName)
+ } else {
+ partitions = append(partitions, recoverInfo.PartitionName)
+ }
+ log.Infof("recover info with for partition(%s) for table %s, will trigger partial sync",
+ partitions, recoverInfo.TableName)
+ // if source does multiple recover of partition, then there is a race
+ // condition and some recover might miss due to commitseq change after snapshot.
+ return j.newPartialSnapshot(recoverInfo.TableId, recoverInfo.TableName, nil, true)
}
-// handleLightningSchemaChange
-func (j *Job) handleLightningSchemaChange(binlog *festruct.TBinlog) error {
- log.Infof("handle lightning schema change binlog")
+func (j *Job) handleRestoreInfo(binlog *festruct.TBinlog) error {
+ log.Infof("handle restore info binlog, prevCommitSeq: %d, commitSeq: %d",
+ j.progress.PrevCommitSeq, j.progress.CommitSeq)
data := binlog.GetData()
- lightningSchemaChange, err := record.NewModifyTableAddOrDropColumnsFromJson(data)
+ restoreInfo, err := record.NewRestoreInfoFromJson(data)
if err != nil {
return err
}
-
- log.Debugf("lightningSchemaChange %v", lightningSchemaChange)
-
- rawSql := lightningSchemaChange.RawSql
- // "rawSql": "ALTER TABLE `default_cluster:ccr`.`test_ddl` ADD COLUMN `nid1` int(11) NULL COMMENT \"\""
- // replace `default_cluster:${Src.Database}`.`test_ddl` to `test_ddl`
- sql := strings.Replace(rawSql, fmt.Sprintf("`default_cluster:%s`.", j.Src.Database), "", 1)
- log.Infof("lightningSchemaChangeSql, rawSql: %s, sql: %s", rawSql, sql)
- return j.IDest.DbExec(sql)
+ return j.handleRestoreInfoRecord(binlog.GetCommitSeq(), restoreInfo)
}
-func (j *Job) handleTruncateTable(binlog *festruct.TBinlog) error {
- log.Infof("handle truncate table binlog")
+func (j *Job) handleRestoreInfoRecord(commitSeq int64, restoreInfo *record.RestoreInfo) error {
+ if len(restoreInfo.TableInfo) != 1 {
+ // for both table and db sync take a full snapshot.
+ log.Warnf("Lets do new snapshot")
+ return j.newSnapshot(commitSeq)
+ }
+
+ if len(restoreInfo.TableInfo) == 1 {
+ for tableId, tableName := range restoreInfo.TableInfo {
+ switch j.SyncType {
+ case TableSync:
+ log.Warnf("full snapshot, table:%d and name:%s",
+ tableId, tableName)
+ return j.newSnapshot(commitSeq)
+ case DBSync:
+ log.Warnf("new partial snapshot, table:%d and name:%s",
+ tableId, tableName)
+ replace := true // replace the old data to avoid blocking reading
+ return j.newPartialSnapshot(tableId, tableName, nil, replace)
+ default:
+ break
+ }
+ }
+ }
+ //This is unreachable.
+ return nil
+}
+func (j *Job) handleBarrier(binlog *festruct.TBinlog) error {
data := binlog.GetData()
- truncateTable, err := record.NewTruncateTableFromJson(data)
+ barrierLog, err := record.NewBarrierLogFromJson(data)
if err != nil {
return err
}
- var destTableName string
- switch j.SyncType {
- case DBSync:
- destTableName = truncateTable.TableName
- case TableSync:
- destTableName = j.Dest.Table
- default:
- return xerror.Panicf(xerror.Normal, "invalid sync type: %v", j.SyncType)
- }
-
- var sql string
- if truncateTable.RawSql == "" {
- sql = fmt.Sprintf("TRUNCATE TABLE %s", destTableName)
- } else {
- sql = fmt.Sprintf("TRUNCATE TABLE %s %s", destTableName, truncateTable.RawSql)
+ if barrierLog.Binlog == "" {
+ log.Info("handle barrier binlog, ignore it")
+ return nil
}
- log.Infof("truncateTableSql: %s", sql)
+ binlogType := festruct.TBinlogType(barrierLog.BinlogType)
+ log.Infof("handle barrier binlog with type %s, prevCommitSeq: %d, commitSeq: %d",
+ binlogType, j.progress.PrevCommitSeq, j.progress.CommitSeq)
- err = j.IDest.DbExec(sql)
- if err == nil {
- if srcTableName, err := j.srcMeta.GetTableNameById(truncateTable.TableId); err == nil {
- // if err != nil, maybe truncate table had been dropped
- j.srcMeta.ClearTable(j.Src.Database, srcTableName)
+ commitSeq := binlog.GetCommitSeq()
+ switch binlogType {
+ case festruct.TBinlogType_RENAME_TABLE:
+ renameTable, err := record.NewRenameTableFromJson(barrierLog.Binlog)
+ if err != nil {
+ return err
}
- j.destMeta.ClearTable(j.Dest.Database, destTableName)
+ return j.handleRenameTableRecord(commitSeq, renameTable)
+ case festruct.TBinlogType_RENAME_COLUMN:
+ renameColumn, err := record.NewRenameColumnFromJson(barrierLog.Binlog)
+ if err != nil {
+ return err
+ }
+ return j.handleRenameColumnRecord(commitSeq, renameColumn)
+ case festruct.TBinlogType_RENAME_PARTITION:
+ renamePartition, err := record.NewRenamePartitionFromJson(barrierLog.Binlog)
+ if err != nil {
+ return err
+ }
+ return j.handleRenamePartitionRecord(commitSeq, renamePartition)
+ case festruct.TBinlogType_RENAME_ROLLUP:
+ renameRollup, err := record.NewRenameRollupFromJson(barrierLog.Binlog)
+ if err != nil {
+ return err
+ }
+ return j.handleRenameRollupRecord(binlog.GetCommitSeq(), renameRollup)
+ case festruct.TBinlogType_DROP_ROLLUP:
+ dropRollup, err := record.NewDropRollupFromJson(barrierLog.Binlog)
+ if err != nil {
+ return err
+ }
+ return j.handleDropRollupRecord(commitSeq, dropRollup)
+ case festruct.TBinlogType_REPLACE_TABLE:
+ replaceTable, err := record.NewReplaceTableRecordFromJson(barrierLog.Binlog)
+ if err != nil {
+ return err
+ }
+ return j.handleReplaceTableRecord(commitSeq, replaceTable)
+ case festruct.TBinlogType_MODIFY_TABLE_ADD_OR_DROP_INVERTED_INDICES:
+ m, err := record.NewModifyTableAddOrDropInvertedIndicesFromJson(barrierLog.Binlog)
+ if err != nil {
+ return err
+ }
+ return j.handleModifyTableAddOrDropInvertedIndicesRecord(commitSeq, m)
+ case festruct.TBinlogType_INDEX_CHANGE_JOB:
+ job, err := record.NewIndexChangeJobFromJson(barrierLog.Binlog)
+ if err != nil {
+ return err
+ }
+ return j.handleIndexChangeJobRecord(commitSeq, job)
+ case festruct.TBinlogType_MODIFY_VIEW_DEF:
+ alterView, err := record.NewAlterViewFromJson(barrierLog.Binlog)
+ if err != nil {
+ return err
+ }
+ return j.handleAlterViewDefRecord(commitSeq, alterView)
+ case festruct.TBinlogType_MODIFY_COMMENT:
+ modifyComment, err := record.NewModifyCommentFromJson(barrierLog.Binlog)
+ if err != nil {
+ return err
+ }
+ return j.handleModifyCommentRecord(commitSeq, modifyComment)
+ case festruct.TBinlogType_RECOVER_INFO:
+ recoverInfo, err := record.NewRecoverInfoFromJson(barrierLog.Binlog)
+ if err != nil {
+ return err
+ }
+ return j.handleRecoverInfoRecord(commitSeq, recoverInfo)
+ case festruct.TBinlogType_RESTORE_INFO:
+ restoreInfo, err := record.NewRestoreInfoFromJson(barrierLog.Binlog)
+ if err != nil {
+ return err
+ }
+ return j.handleRestoreInfoRecord(commitSeq, restoreInfo)
+ case festruct.TBinlogType_BARRIER:
+ log.Info("handle barrier binlog, ignore it")
+ default:
+ return xerror.Errorf(xerror.Normal, "unknown binlog type wrapped by barrier: %d", barrierLog.BinlogType)
}
-
- return err
+ return nil
}
// return: error && bool backToRunLoop
func (j *Job) handleBinlogs(binlogs []*festruct.TBinlog) (error, bool) {
+ log.Infof("handle binlogs, binlogs size: %d", len(binlogs))
+
for _, binlog := range binlogs {
// Step 1: dispatch handle binlog
if err := j.handleBinlog(binlog); err != nil {
+ log.Errorf("handle binlog failed, prevCommitSeq: %d, commitSeq: %d, binlog type: %s, binlog data: %s",
+ j.progress.PrevCommitSeq, j.progress.CommitSeq, binlog.GetType(), binlog.GetData())
return err, false
}
+ // Step 2: check job state, if not incrementalSync, such as DBPartialSync, break
+ if !j.isIncrementalSync() {
+ log.Debugf("job state is not incremental sync, back to run loop, job state: %s", j.progress.SyncState)
+ return nil, true
+ }
+
+ // Step 3: update progress
commitSeq := binlog.GetCommitSeq()
if j.SyncType == DBSync && j.progress.TableCommitSeqMap != nil {
- // TODO: [PERFORMANCE] use largest tableCommitSeq in memorydata to acc it
// when all table commit seq > commitSeq, it's true
reachSwitchToDBIncrementalSync := true
for _, tableCommitSeq := range j.progress.TableCommitSeqMap {
@@ -1049,15 +2783,10 @@ func (j *Job) handleBinlogs(binlogs []*festruct.TBinlog) (error, bool) {
}
}
- // Step 2: update progress to db
+ // Step 4: update progress to db
if !j.progress.IsDone() {
j.progress.Done()
}
-
- // Step 3: check job state, if not incrementalSync, break
- if !j.isIncrementalSync() {
- return nil, true
- }
}
return nil, false
}
@@ -1067,15 +2796,15 @@ func (j *Job) handleBinlog(binlog *festruct.TBinlog) error {
return xerror.Errorf(xerror.Normal, "invalid binlog: %v", binlog)
}
- log.Debugf("binlog data: %s", binlog.GetData())
+ log.Debugf("binlog type: %s, binlog data: %s", binlog.GetType(), binlog.GetData())
// Step 2: update job progress
j.progress.StartHandle(binlog.GetCommitSeq())
+ xmetrics.HandlingBinlog(j.Name, binlog.GetCommitSeq())
- // TODO: use table driven, keep this and driven, conert BinlogType to TBinlogType
switch binlog.GetType() {
case festruct.TBinlogType_UPSERT:
- return j.handleUpsert(binlog)
+ return j.handleUpsertWithRetry(binlog)
case festruct.TBinlogType_ADD_PARTITION:
return j.handleAddPartition(binlog)
case festruct.TBinlogType_CREATE_TABLE:
@@ -1088,16 +2817,44 @@ func (j *Job) handleBinlog(binlog *festruct.TBinlog) error {
return j.handleAlterJob(binlog)
case festruct.TBinlogType_MODIFY_TABLE_ADD_OR_DROP_COLUMNS:
return j.handleLightningSchemaChange(binlog)
+ case festruct.TBinlogType_RENAME_COLUMN:
+ return j.handleRenameColumn(binlog)
+ case festruct.TBinlogType_MODIFY_COMMENT:
+ return j.handleModifyComment(binlog)
case festruct.TBinlogType_DUMMY:
return j.handleDummy(binlog)
case festruct.TBinlogType_ALTER_DATABASE_PROPERTY:
- // TODO(Drogon)
+ log.Info("handle alter database property binlog, ignore it")
case festruct.TBinlogType_MODIFY_TABLE_PROPERTY:
- // TODO(Drogon)
+ log.Info("handle alter table property binlog, ignore it")
case festruct.TBinlogType_BARRIER:
- log.Info("handle barrier binlog")
+ return j.handleBarrier(binlog)
case festruct.TBinlogType_TRUNCATE_TABLE:
return j.handleTruncateTable(binlog)
+ case festruct.TBinlogType_RENAME_TABLE:
+ return j.handleRenameTable(binlog)
+ case festruct.TBinlogType_REPLACE_PARTITIONS:
+ return j.handleReplacePartitions(binlog)
+ case festruct.TBinlogType_MODIFY_PARTITIONS:
+ return j.handleModifyPartitions(binlog)
+ case festruct.TBinlogType_REPLACE_TABLE:
+ return j.handleReplaceTable(binlog)
+ case festruct.TBinlogType_MODIFY_VIEW_DEF:
+ return j.handleAlterViewDef(binlog)
+ case festruct.TBinlogType_MODIFY_TABLE_ADD_OR_DROP_INVERTED_INDICES:
+ return j.handleModifyTableAddOrDropInvertedIndices(binlog)
+ case festruct.TBinlogType_INDEX_CHANGE_JOB:
+ return j.handleIndexChangeJob(binlog)
+ case festruct.TBinlogType_RENAME_PARTITION:
+ return j.handleRenamePartition(binlog)
+ case festruct.TBinlogType_RENAME_ROLLUP:
+ return j.handleRenameRollup(binlog)
+ case festruct.TBinlogType_DROP_ROLLUP:
+ return j.handleDropRollup(binlog)
+ case festruct.TBinlogType_RECOVER_INFO:
+ return j.handleRecoverInfo(binlog)
+ case festruct.TBinlogType_RESTORE_INFO:
+ return j.handleRestoreInfo(binlog)
default:
return xerror.Errorf(xerror.Normal, "unknown binlog type: %v", binlog.GetType())
}
@@ -1110,7 +2867,7 @@ func (j *Job) recoverIncrementalSync() error {
case BinlogUpsert:
return j.handleUpsert(nil)
default:
- j.progress.Rollback()
+ j.progress.Rollback(j.SkipError)
}
return nil
@@ -1118,7 +2875,8 @@ func (j *Job) recoverIncrementalSync() error {
func (j *Job) incrementalSync() error {
if !j.progress.IsDone() {
- log.Infof("job progress is not done, state is (%s), need recover", j.progress.SubSyncState)
+ log.Infof("job progress is not done, need recover. state: %s, prevCommitSeq: %d, commitSeq: %d",
+ j.progress.SubSyncState, j.progress.PrevCommitSeq, j.progress.CommitSeq)
return j.recoverIncrementalSync()
}
@@ -1126,7 +2884,7 @@ func (j *Job) incrementalSync() error {
// Step 1: get binlog
log.Debug("start incremental sync")
src := &j.Src
- srcRpc, err := j.rpcFactory.NewFeRpc(src)
+ srcRpc, err := j.factory.NewFeRpc(src)
if err != nil {
log.Errorf("new fe rpc failed, src: %v, err: %+v", src, err)
return err
@@ -1134,12 +2892,20 @@ func (j *Job) incrementalSync() error {
// Step 2: handle all binlog
for {
+ if j.forceFullsync {
+ log.Warnf("job is forced to step fullsync by user")
+ j.forceFullsync = false
+ _ = j.newSnapshot(j.progress.CommitSeq)
+ return nil
+ }
+
+ // The CommitSeq is equals to PrevCommitSeq in here.
commitSeq := j.progress.CommitSeq
log.Debugf("src: %s, commitSeq: %v", src, commitSeq)
getBinlogResp, err := srcRpc.GetBinlog(src, commitSeq)
if err != nil {
- return nil
+ return err
}
log.Debugf("resp: %v", getBinlogResp)
@@ -1157,7 +2923,8 @@ func (j *Job) incrementalSync() error {
case tstatus.TStatusCode_BINLOG_NOT_FOUND_TABLE:
return xerror.Errorf(xerror.Normal, "can't found table")
default:
- return xerror.Errorf(xerror.Normal, "invalid binlog status type: %v", status.StatusCode)
+ return xerror.Errorf(xerror.Normal, "invalid binlog status type: %v, msg: %s",
+ status.StatusCode, utils.FirstOr(status.GetErrorMsgs(), ""))
}
// Step 2.2: handle binlogs records if has job
@@ -1197,6 +2964,9 @@ func (j *Job) tableSync() error {
case TableIncrementalSync:
log.Debug("table incremental sync")
return j.incrementalSync()
+ case TablePartialSync:
+ log.Debug("table partial sync")
+ return j.partialSync()
default:
return xerror.Errorf(xerror.Normal, "unknown sync state: %v", j.progress.SyncState)
}
@@ -1208,7 +2978,6 @@ func (j *Job) dbTablesIncrementalSync() error {
return j.incrementalSync()
}
-// TODO(Drogon): impl DBSpecificTableFullSync
func (j *Job) dbSpecificTableFullSync() error {
log.Debug("db specific table full sync")
@@ -1227,6 +2996,9 @@ func (j *Job) dbSync() error {
case DBIncrementalSync:
log.Debug("db incremental sync")
return j.incrementalSync()
+ case DBPartialSync:
+ log.Debug("db partial sync")
+ return j.partialSync()
default:
return xerror.Errorf(xerror.Normal, "unknown db sync state: %v", j.progress.SyncState)
}
@@ -1250,17 +3022,19 @@ func (j *Job) sync() error {
func (j *Job) handleError(err error) error {
var xerr *xerror.XError
if !errors.As(err, &xerr) {
+ log.Errorf("convert error to xerror failed, err: %+v", err)
return nil
}
+ xmetrics.AddError(xerr)
if xerr.IsPanic() {
+ log.Errorf("job panic, job: %s, err: %+v", j.Name, err)
return err
}
- // TODO(Drogon): do more things, not only snapshot
if xerr.Category() == xerror.Meta {
- // TODO(Drogon): handle error
- j.newSnapshot(j.progress.CommitSeq)
+ log.Warnf("receive meta category error, make new snapshot, job: %s, err: %v", j.Name, err)
+ _ = j.newSnapshot(j.progress.CommitSeq)
}
return nil
}
@@ -1272,18 +3046,25 @@ func (j *Job) run() {
var panicError error
for {
+ j.updateJobStatus()
+
+ // do maybeDeleted first to avoid mark job deleted after job stopped & before job run & close stop chan gap in Delete, so job will not run
+ if j.maybeDeleted() {
+ return
+ }
+
select {
case <-j.stop:
gls.DeleteGls(gls.GoID())
log.Infof("job stopped, job: %s", j.Name)
return
+
case <-ticker.C:
+ // loop to print error, not panic, waiting for user to pause/stop/remove Job
if j.getJobState() != JobRunning {
break
}
- // loop to print error, not panic, waiting for user to pause/stop/remove Job
- // TODO(Drogon): Add user resume the job, so reset panicError for retry
if panicError != nil {
log.Errorf("job panic, job: %s, err: %+v", j.Name, panicError)
break
@@ -1294,7 +3075,7 @@ func (j *Job) run() {
break
}
- log.Errorf("job sync failed, job: %s, err: %+v", j.Name, err)
+ log.Warnf("job sync failed, job: %s, err: %+v", j.Name, err)
panicError = j.handleError(err)
}
}
@@ -1303,6 +3084,9 @@ func (j *Job) run() {
func (j *Job) newSnapshot(commitSeq int64) error {
log.Infof("new snapshot, commitSeq: %d", commitSeq)
+ j.progress.PartialSyncData = nil
+ j.progress.TableAliases = nil
+ j.progress.SyncId += 1
switch j.SyncType {
case TableSync:
j.progress.NextWithPersist(commitSeq, TableFullSync, BeginCreateSnapshot, "")
@@ -1317,6 +3101,58 @@ func (j *Job) newSnapshot(commitSeq int64) error {
}
}
+// New partial snapshot, with the source cluster table name and the partitions to sync.
+// A empty partitions means to sync the whole table.
+//
+// If the replace is true, the restore task will load data into a new table and replaces the old
+// one when restore finished. So replace requires whole table partial sync.
+func (j *Job) newPartialSnapshot(tableId int64, table string, partitions []string, replace bool) error {
+ if j.SyncType == TableSync && table != j.Src.Table {
+ return xerror.Errorf(xerror.Normal,
+ "partial sync table name is not equals to the source name %s, table: %s, sync type: table", j.Src.Table, table)
+ }
+
+ if replace && len(partitions) != 0 {
+ return xerror.Errorf(xerror.Normal,
+ "partial sync with replace but partitions is not empty, table: %s, len: %d", table, len(partitions))
+ }
+
+ // The binlog of commitSeq will be skipped once the partial snapshot finished.
+ commitSeq := j.progress.CommitSeq
+
+ syncData := &JobPartialSyncData{
+ TableId: tableId,
+ Table: table,
+ Partitions: partitions,
+ }
+ j.progress.PartialSyncData = syncData
+ j.progress.TableAliases = nil
+ j.progress.SyncId += 1
+ if replace {
+ alias := TableAlias(table)
+ j.progress.TableAliases = make(map[string]string)
+ j.progress.TableAliases[table] = alias
+ log.Infof("new partial snapshot, commitSeq: %d, table id: %d, table: %s, alias: %s",
+ commitSeq, tableId, table, alias)
+ } else {
+ log.Infof("new partial snapshot, commitSeq: %d, table id: %d, table: %s, partitions: %v",
+ commitSeq, tableId, table, partitions)
+ }
+
+ switch j.SyncType {
+ case TableSync:
+ j.progress.NextWithPersist(commitSeq, TablePartialSync, BeginCreateSnapshot, "")
+ return nil
+ case DBSync:
+ j.progress.NextWithPersist(commitSeq, DBPartialSync, BeginCreateSnapshot, "")
+ return nil
+ default:
+ err := xerror.Panicf(xerror.Normal, "unknown table sync type: %v", j.SyncType)
+ log.Fatalf("run %+v", err)
+ return err
+ }
+}
+
// run job
func (j *Job) Run() error {
gls.ResetGls(gls.GoID(), map[interface{}]interface{}{})
@@ -1351,8 +3187,8 @@ func (j *Job) Run() error {
// Hack: for drop table
if j.SyncType == DBSync {
- j.srcMeta.GetTables()
- j.destMeta.GetTables()
+ j.srcMeta.ClearTablesCache()
+ j.destMeta.ClearTablesCache()
}
j.run()
@@ -1366,37 +3202,23 @@ func (j *Job) desyncTable() error {
if err != nil {
return err
}
-
- desyncSql := fmt.Sprintf("ALTER TABLE %s SET (\"is_being_synced\"=\"false\")", tableName)
- log.Debugf("db exec: %s", desyncSql)
- if err := j.IDest.DbExec(desyncSql); err != nil {
- return xerror.Wrapf(err, xerror.FE, "failed tables: %s", tableName)
- }
- return nil
+ return j.IDest.DesyncTables(tableName)
}
func (j *Job) desyncDB() error {
log.Debugf("desync db")
- var failedTable string = ""
tables, err := j.destMeta.GetTables()
if err != nil {
return err
}
+ tableNames := []string{}
for _, tableMeta := range tables {
- desyncSql := fmt.Sprintf("ALTER TABLE %s SET (\"is_being_synced\"=\"false\")", tableMeta.Name)
- log.Debugf("db exec: %s", desyncSql)
- if err := j.IDest.DbExec(desyncSql); err != nil {
- failedTable += tableMeta.Name + " "
- }
- }
-
- if failedTable != "" {
- return xerror.Errorf(xerror.FE, "failed tables: %s", failedTable)
+ tableNames = append(tableNames, tableMeta.Name)
}
- return nil
+ return j.IDest.DesyncTables(tableNames...)
}
func (j *Job) Desync() error {
@@ -1407,13 +3229,79 @@ func (j *Job) Desync() error {
}
}
+func (j *Job) UpdateSkipError(skipError bool) error {
+ j.lock.Lock()
+ defer j.lock.Unlock()
+
+ originSkipError := j.SkipError
+ if originSkipError == skipError {
+ return nil
+ }
+
+ j.SkipError = skipError
+ if err := j.persistJob(); err != nil {
+ j.SkipError = originSkipError
+ return err
+ } else {
+ return nil
+ }
+}
+
// stop job
func (j *Job) Stop() {
close(j.stop)
}
+// delete job
+func (j *Job) Delete() {
+ j.isDeleted.Store(true)
+ close(j.stop)
+}
+
+func (j *Job) maybeDeleted() bool {
+ if !j.isDeleted.Load() {
+ return false
+ }
+
+ // job had been deleted
+ log.Infof("job deleted, job: %s, remove in db", j.Name)
+ if err := j.db.RemoveJob(j.Name); err != nil {
+ log.Errorf("remove job failed, job: %s, err: %+v", j.Name, err)
+ }
+ return true
+}
+
+func (j *Job) updateFrontends() error {
+ if frontends, err := j.srcMeta.GetFrontends(); err != nil {
+ log.Warnf("get src frontends failed, fe: %+v", j.Src)
+ return err
+ } else {
+ for _, frontend := range frontends {
+ j.Src.Frontends = append(j.Src.Frontends, *frontend)
+ }
+ }
+ log.Debugf("src frontends %+v", j.Src.Frontends)
+
+ if frontends, err := j.destMeta.GetFrontends(); err != nil {
+ log.Warnf("get dest frontends failed, fe: %+v", j.Dest)
+ return err
+ } else {
+ for _, frontend := range frontends {
+ j.Dest.Frontends = append(j.Dest.Frontends, *frontend)
+ }
+ }
+ log.Debugf("dest frontends %+v", j.Dest.Frontends)
+
+ return nil
+}
+
func (j *Job) FirstRun() error {
- log.Info("first run check job", zap.String("src", j.Src.String()), zap.String("dest", j.Dest.String()))
+ log.Infof("first run check job, src: %s, dest: %s", &j.Src, &j.Dest)
+
+ // Step 0: get all frontends
+ if err := j.updateFrontends(); err != nil {
+ return err
+ }
// Step 1: check fe and be binlog feature is enabled
if err := j.srcMeta.CheckBinlogFeature(); err != nil {
@@ -1479,7 +3367,7 @@ func (j *Job) FirstRun() error {
} else {
j.Dest.DbId = destDbId
}
- if j.SyncType == TableSync {
+ if j.SyncType == TableSync && !j.allowTableExists {
dest_table_exists, err := j.IDest.CheckTableExists()
if err != nil {
return err
@@ -1492,13 +3380,12 @@ func (j *Job) FirstRun() error {
return nil
}
-// HACK: temp impl
func (j *Job) GetLag() (int64, error) {
j.lock.Lock()
defer j.lock.Unlock()
srcSpec := &j.Src
- rpc, err := j.rpcFactory.NewFeRpc(srcSpec)
+ rpc, err := j.factory.NewFeRpc(srcSpec)
if err != nil {
return 0, err
}
@@ -1551,6 +3438,26 @@ func (j *Job) Resume() error {
return j.changeJobState(JobRunning)
}
+func (j *Job) ForceFullsync() {
+ log.Infof("force job %s step full sync", j.Name)
+
+ j.lock.Lock()
+ defer j.lock.Unlock()
+ j.forceFullsync = true
+}
+
+type RawJobStatus struct {
+ state int32
+ progressState int32
+}
+
+func (j *Job) updateJobStatus() {
+ atomic.StoreInt32(&j.rawStatus.state, int32(j.State))
+ if j.progress != nil {
+ atomic.StoreInt32(&j.rawStatus.progressState, int32(j.progress.SyncState))
+ }
+}
+
type JobStatus struct {
Name string `json:"name"`
State string `json:"state"`
@@ -1558,15 +3465,89 @@ type JobStatus struct {
}
func (j *Job) Status() *JobStatus {
- j.lock.Lock()
- defer j.lock.Unlock()
-
- state := j.State.String()
- progress_state := j.progress.SyncState.String()
+ state := JobState(atomic.LoadInt32(&j.rawStatus.state)).String()
+ progressState := SyncState(atomic.LoadInt32(&j.rawStatus.progressState)).String()
return &JobStatus{
Name: j.Name,
State: state,
- ProgressState: progress_state,
+ ProgressState: progressState,
+ }
+}
+
+func (j *Job) UpdateHostMapping(srcHostMaps, destHostMaps map[string]string) error {
+ j.lock.Lock()
+ defer j.lock.Unlock()
+
+ oldSrcHostMapping := j.Src.HostMapping
+ if j.Src.HostMapping == nil {
+ j.Src.HostMapping = make(map[string]string)
+ }
+ for private, public := range srcHostMaps {
+ if public == "" {
+ delete(j.Src.HostMapping, private)
+ } else {
+ j.Src.HostMapping[private] = public
+ }
+ }
+
+ oldDestHostMapping := j.Dest.HostMapping
+ if j.Dest.HostMapping == nil {
+ j.Dest.HostMapping = make(map[string]string)
+ }
+ for private, public := range destHostMaps {
+ if public == "" {
+ delete(j.Dest.HostMapping, private)
+ } else {
+ j.Dest.HostMapping[private] = public
+ }
+ }
+
+ if err := j.persistJob(); err != nil {
+ j.Src.HostMapping = oldSrcHostMapping
+ j.Dest.HostMapping = oldDestHostMapping
+ return err
+ }
+
+ log.Debugf("update job %s src host mapping %+v, dest host mapping: %+v", j.Name, srcHostMaps, destHostMaps)
+ return nil
+}
+
+func isTxnCommitted(status *tstatus.TStatus) bool {
+ return isStatusContainsAny(status, "is already COMMITTED")
+}
+
+func isTxnNotFound(status *tstatus.TStatus) bool {
+ errMessages := status.GetErrorMsgs()
+ for _, errMessage := range errMessages {
+ // detailMessage = transaction not found
+ // or detailMessage = transaction [12356] not found
+ if strings.Contains(errMessage, "transaction not found") || regexp.MustCompile(`transaction \[\d+\] not found`).MatchString(errMessage) {
+ return true
+ }
+ }
+ return false
+}
+
+func isTxnAborted(status *tstatus.TStatus) bool {
+ return isStatusContainsAny(status, "is already aborted")
+}
+
+func isTableNotFound(status *tstatus.TStatus) bool {
+ // 1. FE FrontendServiceImpl.beginTxnImpl
+ // 2. FE FrontendServiceImpl.commitTxnImpl
+ // 3. FE Table.tryWriteLockOrMetaException
+ return isStatusContainsAny(status, "can't find table id:", "table not found", "unknown table")
+}
+
+func isStatusContainsAny(status *tstatus.TStatus, patterns ...string) bool {
+ errMessages := status.GetErrorMsgs()
+ for _, errMessage := range errMessages {
+ for _, substr := range patterns {
+ if strings.Contains(errMessage, substr) {
+ return true
+ }
+ }
}
+ return false
}
diff --git a/pkg/ccr/job_factory.go b/pkg/ccr/job_factory.go
index 326bf1ed..914657f4 100644
--- a/pkg/ccr/job_factory.go
+++ b/pkg/ccr/job_factory.go
@@ -2,8 +2,7 @@ package ccr
import "context"
-type JobFactory struct {
-}
+type JobFactory struct{}
// create job factory
func NewJobFactory() *JobFactory {
diff --git a/pkg/ccr/job_manager.go b/pkg/ccr/job_manager.go
index 6e1f4a4d..fa6e92a6 100644
--- a/pkg/ccr/job_manager.go
+++ b/pkg/ccr/job_manager.go
@@ -2,16 +2,16 @@ package ccr
import (
"encoding/json"
+ "fmt"
"sync"
"github.com/selectdb/ccr_syncer/pkg/storage"
"github.com/selectdb/ccr_syncer/pkg/xerror"
+ "github.com/selectdb/ccr_syncer/pkg/xmetrics"
log "github.com/sirupsen/logrus"
)
-const (
- ErrJobExist = "job exist"
-)
+var errJobExist = xerror.NewWithoutStack(xerror.Normal, "job exist")
// job manager is thread safety
type JobManager struct {
@@ -47,7 +47,7 @@ func (jm *JobManager) AddJob(job *Job) error {
// Step 1: check job exist
if _, ok := jm.jobs[job.Name]; ok {
- return xerror.Errorf(xerror.Normal, "%s: %s", ErrJobExist, job.Name)
+ return xerror.XWrapf(errJobExist, "job: %s", job.Name)
}
// Step 2: check job first run, mostly for dest/src fe db/table info
@@ -68,6 +68,9 @@ func (jm *JobManager) AddJob(job *Job) error {
jm.jobs[job.Name] = job
jm.runJob(job)
+ // Step 5: add metrics
+ xmetrics.AddNewJob(job.Name)
+
return nil
}
@@ -82,6 +85,7 @@ func (jm *JobManager) Recover(jobNames []string) error {
if _, ok := jm.jobs[jobName]; ok {
continue
}
+
log.Infof("recover job: %s", jobName)
if jobInfo, err := jm.db.GetJobInfo(jobName); err != nil {
@@ -107,14 +111,21 @@ func (jm *JobManager) RemoveJob(name string) error {
jm.lock.Lock()
defer jm.lock.Unlock()
+ job := jm.jobs[name]
// check job exist
- if job, ok := jm.jobs[name]; ok {
- // stop job
- job.Stop()
+ if job == nil {
+ return xerror.Errorf(xerror.Normal, "job not exist: %s", name)
+ }
+
+ // stop job
+ job.Delete()
+ if err := jm.db.RemoveJob(name); err == nil {
delete(jm.jobs, name)
- return jm.db.RemoveJob(name)
+ log.Infof("job [%s] has been successfully deleted, but it needs to wait until an isochronous point before it will completely STOP", name)
+ return nil
} else {
- return xerror.Errorf(xerror.Normal, "job not exist: %s", name)
+ log.Errorf("remove job [%s] in db failed: %+v, but job is stopped", name, err)
+ return fmt.Errorf("remove job [%s] in db failed, but job is stopped, if can resume/delete, please do it manually", name)
}
}
@@ -194,6 +205,13 @@ func (jm *JobManager) Resume(jobName string) error {
})
}
+func (jm *JobManager) ForceFullsync(jobName string) error {
+ return jm.dealJob(jobName, func(job *Job) error {
+ job.ForceFullsync()
+ return nil
+ })
+}
+
func (jm *JobManager) GetJobStatus(jobName string) (*JobStatus, error) {
jm.lock.RLock()
defer jm.lock.RUnlock()
@@ -226,3 +244,25 @@ func (jm *JobManager) ListJobs() []*JobStatus {
}
return jobs
}
+
+func (jm *JobManager) UpdateJobSkipError(jobName string, skipError bool) error {
+ jm.lock.Lock()
+ defer jm.lock.Unlock()
+
+ if job, ok := jm.jobs[jobName]; ok {
+ return job.UpdateSkipError(skipError)
+ } else {
+ return xerror.Errorf(xerror.Normal, "job not exist: %s", jobName)
+ }
+}
+
+func (jm *JobManager) UpdateHostMapping(jobName string, srcHostMapping, destHostMapping map[string]string) error {
+ jm.lock.Lock()
+ defer jm.lock.Unlock()
+
+ if job, ok := jm.jobs[jobName]; ok {
+ return job.UpdateHostMapping(srcHostMapping, destHostMapping)
+ } else {
+ return xerror.Errorf(xerror.Normal, "job not exist: %s", jobName)
+ }
+}
diff --git a/pkg/ccr/job_progress.go b/pkg/ccr/job_progress.go
index 3c9d9803..133f554e 100644
--- a/pkg/ccr/job_progress.go
+++ b/pkg/ccr/job_progress.go
@@ -7,8 +7,8 @@ import (
"github.com/selectdb/ccr_syncer/pkg/storage"
"github.com/selectdb/ccr_syncer/pkg/xerror"
+ "github.com/selectdb/ccr_syncer/pkg/xmetrics"
log "github.com/sirupsen/logrus"
- "go.uber.org/zap"
)
// TODO: rewrite all progress by two level state machine
@@ -26,10 +26,12 @@ const (
DBTablesIncrementalSync SyncState = 1
DBSpecificTableFullSync SyncState = 2
DBIncrementalSync SyncState = 3
+ DBPartialSync SyncState = 4 // sync partitions
// Table sync state machine states
TableFullSync SyncState = 500
TableIncrementalSync SyncState = 501
+ TablePartialSync SyncState = 502
// TODO: add timeout state for restart full sync
)
@@ -45,10 +47,14 @@ func (s SyncState) String() string {
return "DBSpecificTableFullSync"
case DBIncrementalSync:
return "DBIncrementalSync"
+ case DBPartialSync:
+ return "DBPartialSync"
case TableFullSync:
return "TableFullSync"
case TableIncrementalSync:
return "TableIncrementalSync"
+ case TablePartialSync:
+ return "TablePartialSync"
default:
return fmt.Sprintf("Unknown SyncState: %d", s)
}
@@ -89,6 +95,8 @@ var (
AddExtraInfo SubSyncState = SubSyncState{State: 2, BinlogType: BinlogNone}
RestoreSnapshot SubSyncState = SubSyncState{State: 3, BinlogType: BinlogNone}
PersistRestoreInfo SubSyncState = SubSyncState{State: 4, BinlogType: BinlogNone}
+ WaitBackupDone SubSyncState = SubSyncState{State: 5, BinlogType: BinlogNone}
+ WaitRestoreDone SubSyncState = SubSyncState{State: 6, BinlogType: BinlogNone}
BeginTransaction SubSyncState = SubSyncState{State: 11, BinlogType: BinlogUpsert}
IngestBinlog SubSyncState = SubSyncState{State: 12, BinlogType: BinlogUpsert}
@@ -127,6 +135,13 @@ func (s SubSyncState) String() string {
}
}
+type JobPartialSyncData struct {
+ TableId int64 `json:"table_id"`
+ Table string `json:"table"`
+ PartitionIds []int64 `json:"partition_ids"`
+ Partitions []string `json:"partitions"`
+}
+
type JobProgress struct {
JobName string `json:"job_name"`
db storage.DB `json:"-"`
@@ -136,15 +151,36 @@ type JobProgress struct {
// Sub sync state machine states
SubSyncState SubSyncState `json:"sub_sync_state"`
- PrevCommitSeq int64 `json:"prev_commit_seq"`
- CommitSeq int64 `json:"commit_seq"`
- TableCommitSeqMap map[int64]int64 `json:"table_commit_seq_map"` // only for DBTablesIncrementalSync
- InMemoryData any `json:"-"`
- PersistData string `json:"data"` // this often for binlog or snapshot info
+ // The sync id of full/partial snapshot
+ SyncId int64 `json:"job_sync_id"`
+ // The commit seq where the target cluster has synced.
+ PrevCommitSeq int64 `json:"prev_commit_seq"`
+ CommitSeq int64 `json:"commit_seq"`
+ TableMapping map[int64]int64 `json:"table_mapping"`
+ // the upstream table id to name mapping, build during the fullsync,
+ // keep snapshot to avoid rename. it might be staled.
+ TableNameMapping map[int64]string `json:"table_name_mapping,omitempty"`
+ TableCommitSeqMap map[int64]int64 `json:"table_commit_seq_map"` // only for DBTablesIncrementalSync
+ InMemoryData any `json:"-"`
+ PersistData string `json:"data"` // this often for binlog or snapshot info
+ PartialSyncData *JobPartialSyncData `json:"partial_sync_data,omitempty"`
+
+ // The tables need to be replaced rather than dropped during sync.
+ TableAliases map[string]string `json:"table_aliases,omitempty"`
+
+ // The shadow indexes of the pending schema changes
+ ShadowIndexes map[int64]int64 `json:"shadow_index_map,omitempty"`
+
+ // Some fields to save the unix epoch time of the key timepoint.
+ CreatedAt int64 `json:"created_at,omitempty"`
+ FullSyncStartAt int64 `json:"full_sync_start_at,omitempty"`
+ IncrementalSyncStartAt int64 `json:"incremental_sync_start_at,omitempty"`
+ IngestBinlogAt int64 `json:"ingest_binlog_at,omitempty"`
}
func (j *JobProgress) String() string {
- return fmt.Sprintf("JobProgress{JobName: %s, SyncState: %s, SubSyncState: %s, CommitSeq: %d, TableCommitSeqMap: %v, InMemoryData: %v, PersistData: %s}", j.JobName, j.SyncState, j.SubSyncState, j.CommitSeq, j.TableCommitSeqMap, j.InMemoryData, j.PersistData)
+ // const maxStringLength = 64
+ return fmt.Sprintf("JobProgress{JobName: %s, SyncState: %s, SubSyncState: %s, CommitSeq: %d, TableCommitSeqMap: %v, InMemoryData: %.64v, PersistData: %.64s}", j.JobName, j.SyncState, j.SubSyncState, j.CommitSeq, j.TableCommitSeqMap, j.InMemoryData, j.PersistData)
}
func NewJobProgress(jobName string, syncType SyncType, db storage.DB) *JobProgress {
@@ -158,13 +194,23 @@ func NewJobProgress(jobName string, syncType SyncType, db storage.DB) *JobProgre
JobName: jobName,
db: db,
+ SyncId: time.Now().Unix(),
SyncState: syncState,
SubSyncState: BeginCreateSnapshot,
CommitSeq: 0,
+ TableMapping: nil,
TableCommitSeqMap: nil,
InMemoryData: nil,
PersistData: "",
+ PartialSyncData: nil,
+ TableAliases: nil,
+ ShadowIndexes: nil,
+
+ CreatedAt: time.Now().Unix(),
+ FullSyncStartAt: 0,
+ IncrementalSyncStartAt: 0,
+ IngestBinlogAt: 0,
}
}
@@ -176,7 +222,7 @@ func NewJobProgressFromJson(jobName string, db storage.DB) (*JobProgress, error)
for i := 0; i < 3; i++ {
jsonData, err = db.GetProgress(jobName)
if err != nil {
- log.Error("get job progress failed", zap.String("job", jobName), zap.Error(err))
+ log.Errorf("get job progress failed, error: %+v", err)
continue
}
break
@@ -195,8 +241,17 @@ func NewJobProgressFromJson(jobName string, db storage.DB) (*JobProgress, error)
}
}
+// GetTableId get table id by table name from TableNameMapping
+func (j *JobProgress) GetTableId(tableName string) (int64, bool) {
+ for tableId, table := range j.TableNameMapping {
+ if table == tableName {
+ return tableId, true
+ }
+ }
+ return 0, false
+}
+
func (j *JobProgress) StartHandle(commitSeq int64) {
- j.PrevCommitSeq = j.CommitSeq
j.CommitSeq = commitSeq
j.Persist()
@@ -229,6 +284,10 @@ func _convertToPersistData(persistData any) string {
// Persist is checkpint, next state only get it from persistData
func (j *JobProgress) NextSubCheckpoint(subSyncState SubSyncState, persistData any) {
+ if subSyncState == IngestBinlog {
+ j.IngestBinlogAt = time.Now().Unix()
+ }
+
j.SubSyncState = subSyncState
j.PersistData = _convertToPersistData(persistData)
@@ -247,8 +306,24 @@ func (j *JobProgress) CommitNextSubWithPersist(commitSeq int64, subSyncState Sub
j.Persist()
}
+// Switch to new sync state.
+//
+// The PrevCommitSeq is set to commitSeq, if the sub sync state is done.
func (j *JobProgress) NextWithPersist(commitSeq int64, syncState SyncState, subSyncState SubSyncState, persistData string) {
+ if subSyncState == BeginCreateSnapshot && (syncState == TableFullSync || syncState == DBFullSync) {
+ j.FullSyncStartAt = time.Now().Unix()
+ j.IncrementalSyncStartAt = 0
+ j.IngestBinlogAt = 0
+ } else if subSyncState == Done && (syncState == TableIncrementalSync || syncState == DBIncrementalSync) {
+ j.IncrementalSyncStartAt = time.Now().Unix()
+ j.IngestBinlogAt = 0
+ }
+
j.CommitSeq = commitSeq
+ if subSyncState == Done {
+ j.PrevCommitSeq = commitSeq
+ }
+
j.SyncState = syncState
j.SubSyncState = subSyncState
j.PersistData = persistData
@@ -257,7 +332,7 @@ func (j *JobProgress) NextWithPersist(commitSeq int64, syncState SyncState, subS
j.Persist()
}
-func (j *JobProgress) IsDone() bool { return j.SubSyncState == Done }
+func (j *JobProgress) IsDone() bool { return j.SubSyncState == Done && j.PrevCommitSeq == j.CommitSeq }
// TODO(Drogon): check reset some fields
func (j *JobProgress) Done() {
@@ -266,29 +341,36 @@ func (j *JobProgress) Done() {
j.SubSyncState = Done
j.PrevCommitSeq = j.CommitSeq
+ xmetrics.ConsumeBinlog(j.JobName, j.PrevCommitSeq)
+
j.Persist()
}
-func (j *JobProgress) Rollback() {
+func (j *JobProgress) Rollback(skipError bool) {
log.Debugf("job %s step rollback", j.JobName)
j.SubSyncState = Done
- j.CommitSeq = j.PrevCommitSeq
+ // if rollback, then prev commit seq is the last commit seq
+ // but if skip error, we can consume the binlog then prev commit seq is the last commit seq
+ if !skipError {
+ j.CommitSeq = j.PrevCommitSeq
+ }
+ xmetrics.Rollback(j.JobName, j.PrevCommitSeq)
j.Persist()
}
// write progress to db, busy loop until success
// TODO: add timeout check
func (j *JobProgress) Persist() {
- log.Debugf("update job progress: %s", j)
+ log.Trace("update job progress")
for {
// Step 1: to json
// TODO: fix to json error
jsonBytes, err := json.Marshal(j)
if err != nil {
- log.Error("parse job progress failed", zap.String("job", j.JobName), zap.Error(err))
+ log.Errorf("parse job progress failed, error: %+v", err)
time.Sleep(UPDATE_JOB_PROGRESS_DURATION)
continue
}
@@ -296,7 +378,7 @@ func (j *JobProgress) Persist() {
// Step 2: write to db
err = j.db.UpdateProgress(j.JobName, string(jsonBytes))
if err != nil {
- log.Error("update job progress failed", zap.String("job", j.JobName), zap.Error(err))
+ log.Errorf("update job progress failed, error: %+v", err)
time.Sleep(UPDATE_JOB_PROGRESS_DURATION)
continue
}
@@ -304,5 +386,6 @@ func (j *JobProgress) Persist() {
break
}
- log.Debugf("update job progress done: %s", j)
+ log.Tracef("update job progress done, state: %s, subState: %s, commitSeq: %d, prevCommitSeq: %d",
+ j.SyncState, j.SubSyncState, j.CommitSeq, j.PrevCommitSeq)
}
diff --git a/pkg/ccr/job_progress_test.go b/pkg/ccr/job_progress_test.go
index db1c511e..2db519d2 100644
--- a/pkg/ccr/job_progress_test.go
+++ b/pkg/ccr/job_progress_test.go
@@ -14,6 +14,20 @@ func init() {
log.SetOutput(io.Discard)
}
+func deepEqual(got, expect string) bool {
+ var v1, v2 interface{}
+ err := json.Unmarshal([]byte(got), &v1)
+ if err != nil {
+ return false
+ }
+
+ err = json.Unmarshal([]byte(expect), &v2)
+ if err != nil {
+ return false
+ }
+ return reflect.DeepEqual(v1, v2)
+}
+
func TestJobProgress_MarshalJSON(t *testing.T) {
type fields struct {
JobName string
@@ -22,15 +36,17 @@ func TestJobProgress_MarshalJSON(t *testing.T) {
SubSyncState SubSyncState
PrevCommitSeq int64
CommitSeq int64
+ TableMapping map[int64]int64
TransactionId int64
TableCommitSeqMap map[int64]int64
InMemoryData any
PersistData string
+ TableAliases map[string]string
}
tests := []struct {
name string
fields fields
- want []byte
+ want string
wantErr bool
}{
{
@@ -45,8 +61,27 @@ func TestJobProgress_MarshalJSON(t *testing.T) {
TableCommitSeqMap: map[int64]int64{1: 2},
InMemoryData: nil,
PersistData: "test-data",
+ TableAliases: map[string]string{"table": "alias"},
},
- want: []byte(`{"job_name":"test-job","sync_state":500,"sub_sync_state":{"state":0,"binlog_type":-1},"prev_commit_seq":0,"commit_seq":1,"table_commit_seq_map":{"1":2},"data":"test-data"}`),
+ want: `{
+ "job_name": "test-job",
+ "sync_state": 500,
+ "sub_sync_state": {
+ "state": 0,
+ "binlog_type": -1
+ },
+ "job_sync_id":0,
+ "prev_commit_seq": 0,
+ "commit_seq": 1,
+ "table_mapping": null,
+ "table_commit_seq_map": {
+ "1": 2
+ },
+ "data": "test-data",
+ "table_aliases": {
+ "table": "alias"
+ }
+}`,
wantErr: false,
},
}
@@ -62,13 +97,14 @@ func TestJobProgress_MarshalJSON(t *testing.T) {
TableCommitSeqMap: tt.fields.TableCommitSeqMap,
InMemoryData: tt.fields.InMemoryData,
PersistData: tt.fields.PersistData,
+ TableAliases: tt.fields.TableAliases,
}
got, err := json.Marshal(jp)
if (err != nil) != tt.wantErr {
t.Errorf("JobProgress.MarshalJSON() error = %v, wantErr %v", err, tt.wantErr)
return
}
- if !reflect.DeepEqual(got, tt.want) {
+ if !deepEqual(string(got), tt.want) {
t.Errorf("JobProgress.MarshalJSON() = %v, want %v", string(got), string(tt.want))
}
})
diff --git a/pkg/ccr/job_test.go b/pkg/ccr/job_test.go
deleted file mode 100644
index 3fddaa8e..00000000
--- a/pkg/ccr/job_test.go
+++ /dev/null
@@ -1,1357 +0,0 @@
-package ccr
-
-import (
- "context"
- "encoding/json"
- "fmt"
- "testing"
-
- "github.com/selectdb/ccr_syncer/pkg/ccr/base"
- "github.com/selectdb/ccr_syncer/pkg/ccr/record"
- rpc "github.com/selectdb/ccr_syncer/pkg/rpc"
- bestruct "github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/backendservice"
- festruct "github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/frontendservice"
- "github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/status"
- ttypes "github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/types"
- "github.com/selectdb/ccr_syncer/pkg/test_util"
- "github.com/selectdb/ccr_syncer/pkg/xerror"
- "github.com/tidwall/btree"
- "go.uber.org/mock/gomock"
-)
-
-const (
- backendBaseId = int64(0xdeadbeef * 10)
- tableBaseId = int64(23330)
- dbBaseId = int64(114514)
-)
-
-var (
- dbSrcSpec base.Spec
- dbDestSpec base.Spec
- tblSrcSpec base.Spec
- tblDestSpec base.Spec
-)
-
-func init() {
- dbSrcSpec = base.Spec{
- Frontend: base.Frontend{
- Host: "localhost",
- Port: "9030",
- ThriftPort: "9020",
- },
- User: "root",
- Password: "",
- Database: "src_db_case",
- DbId: dbBaseId,
- Table: "",
- }
- dbDestSpec = base.Spec{
- Frontend: base.Frontend{
- Host: "localhost",
- Port: "9030",
- ThriftPort: "9020",
- },
- User: "root",
- Password: "",
- Database: "dest_db_case",
- DbId: dbBaseId,
- Table: "",
- }
- tblSrcSpec = base.Spec{
- Frontend: base.Frontend{
- Host: "localhost",
- Port: "9030",
- ThriftPort: "9020",
- },
- User: "root",
- Password: "",
- Database: "src_tbl_case",
- DbId: dbBaseId,
- Table: fmt.Sprint(tableBaseId),
- TableId: tableBaseId,
- }
- tblDestSpec = base.Spec{
- Frontend: base.Frontend{
- Host: "localhost",
- Port: "9030",
- ThriftPort: "9020",
- },
- User: "root",
- Password: "",
- Database: "dest_tbl_case",
- DbId: dbBaseId,
- Table: fmt.Sprint(tableBaseId),
- TableId: tableBaseId,
- }
-}
-
-func getPartitionBaseId(tableId int64) int64 {
- return tableId * 10
-}
-
-func getIndexBaseId(partitionId int64) int64 {
- return partitionId * 10
-}
-
-func getTabletBaseId(indexId int64) int64 {
- return indexId * 10
-}
-
-func getReplicaBaseId(indexId int64) int64 {
- return indexId * 100
-}
-
-type BinlogImpl struct {
- CommitSeq int64
- Timestamp int64
- Type festruct.TBinlogType
- DbId int64
-}
-
-func newTestBinlog(binlogType festruct.TBinlogType, tableIds []int64) *festruct.TBinlog {
- binlogImpl := BinlogImpl{
- CommitSeq: 114,
- Timestamp: 514,
- Type: binlogType,
- DbId: 114514,
- }
- binlog := &festruct.TBinlog{
- CommitSeq: &binlogImpl.CommitSeq,
- Timestamp: &binlogImpl.Timestamp,
- Type: &binlogImpl.Type,
- DbId: &binlogImpl.DbId,
- TableIds: tableIds,
- }
-
- return binlog
-}
-
-func newMeta(spec *base.Spec, backends *map[int64]*base.Backend) *DatabaseMeta {
- var tableIds []int64
- if spec.Table == "" {
- tableIds = make([]int64, 0, 3)
- for i := 0; i < 3; i++ {
- tableIds = append(tableIds, tableBaseId+int64(i))
- }
- } else {
- tableIds = make([]int64, 0, 1)
- tableIds = append(tableIds, spec.TableId)
- }
-
- dbMeta := newDatabaseMeta(spec.DbId)
- for _, tableId := range tableIds {
- tblMeta := newTableMeta(tableId)
- tblMeta.DatabaseMeta = dbMeta
-
- partitionId := getPartitionBaseId(tableId)
- partitionMeta := newPartitionMeta(partitionId)
- partitionMeta.TableMeta = tblMeta
- tblMeta.PartitionIdMap[partitionId] = partitionMeta
- tblMeta.PartitionRangeMap[fmt.Sprint(partitionId)] = partitionMeta
-
- indexId := getIndexBaseId(partitionId)
- indexMeta := newIndexMeta(indexId)
- indexMeta.PartitionMeta = partitionMeta
- partitionMeta.IndexIdMap[indexId] = indexMeta
- partitionMeta.IndexNameMap[indexMeta.Name] = indexMeta
-
- tabletId := getTabletBaseId(indexId)
- tabletMeta := newTabletMeta(tabletId)
- tabletMeta.IndexMeta = indexMeta
- tabletMeta.ReplicaMetas = indexMeta.ReplicaMetas
- indexMeta.TabletMetas.Set(tabletId, tabletMeta)
-
- replicaBaseId := getReplicaBaseId(indexId)
- backendNum := len(*backends)
- backendIds := make([]int64, 0, backendNum)
- for backendId := range *backends {
- backendIds = append(backendIds, backendId)
- }
- for i := 0; i < backendNum; i++ {
- replicaId := replicaBaseId + int64(i)
- replicaMeta := newReplicaMeta(replicaId)
- replicaMeta.TabletMeta = tabletMeta
- replicaMeta.TabletId = tabletId
- replicaMeta.BackendId = backendIds[replicaId%int64(backendNum)]
- indexMeta.ReplicaMetas.Set(replicaId, replicaMeta)
- }
- dbMeta.Tables[tableId] = tblMeta
- }
-
- return dbMeta
-}
-
-func newDatabaseMeta(dbId int64) *DatabaseMeta {
- return &DatabaseMeta{
- Id: dbId,
- Tables: make(map[int64]*TableMeta),
- }
-}
-
-func newTableMeta(tableId int64) *TableMeta {
- return &TableMeta{
- Id: tableId,
- Name: fmt.Sprint(tableId),
- PartitionIdMap: make(map[int64]*PartitionMeta),
- PartitionRangeMap: make(map[string]*PartitionMeta),
- }
-}
-
-func newPartitionMeta(partitionId int64) *PartitionMeta {
- return &PartitionMeta{
- Id: partitionId,
- Name: fmt.Sprint(partitionId),
- Key: fmt.Sprint(partitionId),
- Range: fmt.Sprint(partitionId),
- IndexIdMap: make(map[int64]*IndexMeta),
- IndexNameMap: make(map[string]*IndexMeta),
- }
-}
-
-func newIndexMeta(indexId int64) *IndexMeta {
- return &IndexMeta{
- Id: indexId,
- Name: fmt.Sprint(indexId),
- TabletMetas: btree.NewMap[int64, *TabletMeta](degree),
- ReplicaMetas: btree.NewMap[int64, *ReplicaMeta](degree),
- }
-}
-
-func newTabletMeta(tabletId int64) *TabletMeta {
- return &TabletMeta{
- Id: tabletId,
- }
-}
-
-func newReplicaMeta(replicaId int64) *ReplicaMeta {
- return &ReplicaMeta{
- Id: replicaId,
- }
-}
-
-func newBackendMap(backendNum int) map[int64]*base.Backend {
- backendMap := make(map[int64]*base.Backend)
- for i := 0; i < backendNum; i++ {
- backendId := backendBaseId + int64(i)
- backendMap[backendId] = &base.Backend{
- Id: backendId,
- Host: "localhost",
- HeartbeatPort: 0xbeef,
- BePort: 0xbeef,
- HttpPort: 0xbeef,
- BrpcPort: 0xbeef,
- }
- }
-
- return backendMap
-}
-
-type UpsertContext struct {
- context.Context
- CommitSeq int64
- DbId int64
- TableId int64
- TxnId int64
- Version int64
- PartitionId int64
- IndexId int64
- TabletId int64
-}
-
-func newUpsertData(ctx context.Context) (string, error) {
- upsertContext, ok := ctx.(*UpsertContext)
- if !ok {
- return "", xerror.Errorf(xerror.Normal, "invalid context type: %T", ctx)
- }
-
- dataMap := make(map[string]interface{})
- dataMap["commitSeq"] = upsertContext.CommitSeq
- dataMap["txnId"] = upsertContext.TxnId
- dataMap["timeStamp"] = 514
- dataMap["label"] = "insert_cca56f22e3624ab2_90b6b4ac06b44360"
- dataMap["dbId"] = upsertContext.DbId
- tableMap := make(map[string]interface{})
- dataMap["tableRecords"] = tableMap
-
- recordMap := make(map[string]interface{})
-
- partitionRecords := make([]map[string]interface{}, 0, 1)
- partitionRecord := make(map[string]interface{})
- partitionRecord["partitionId"] = upsertContext.PartitionId
- partitionRecord["range"] = fmt.Sprint(upsertContext.PartitionId)
- partitionRecord["version"] = upsertContext.Version
- partitionRecords = append(partitionRecords, partitionRecord)
- recordMap["partitionRecords"] = partitionRecords
-
- indexRecords := make([]int64, 0, 1)
- indexRecords = append(indexRecords, upsertContext.IndexId)
- recordMap["indexIds"] = indexRecords
-
- tableMap[fmt.Sprint(upsertContext.TableId)] = recordMap
-
- if data, err := json.Marshal(dataMap); err != nil {
- return "", err
- } else {
- return string(data), nil
- }
-}
-
-type inMemoryData struct {
- CommitSeq int64 `json:"commit_seq"`
- TxnId int64 `json:"txn_id"`
- DestTableIds []int64 `json:"dest_table_ids"`
- TableRecords []*record.TableRecord `json:"table_records"`
- CommitInfos []*ttypes.TTabletCommitInfo `json:"commit_infos"`
-}
-
-func upateInMemory(jobProgress *JobProgress) error {
- persistData := jobProgress.PersistData
- inMemoryData := &inMemoryData{}
- if err := json.Unmarshal([]byte(persistData), inMemoryData); err != nil {
- return xerror.Errorf(xerror.Normal, "unmarshal persistData failed, persistData: %s", persistData)
- }
- jobProgress.InMemoryData = inMemoryData
- return nil
-}
-
-func TestHandleUpsertInTableSync(t *testing.T) {
- ctrl := gomock.NewController(t)
- defer ctrl.Finish()
-
- // init test data
- txnId := int64(114514233)
- commitSeq := int64(114233514)
- version := int64(233114514)
- srcPartitionId := getPartitionBaseId(tblSrcSpec.TableId)
- srcIndexId := getIndexBaseId(srcPartitionId)
- srcTabletId := getTabletBaseId(srcIndexId)
- destPartitionId := getPartitionBaseId(tblSrcSpec.TableId)
- destIndexId := getIndexBaseId(destPartitionId)
- destTabletId := getTabletBaseId(destIndexId)
-
- backendMap := newBackendMap(3)
- srcMeta := newMeta(&tblSrcSpec, &backendMap)
- destMeta := newMeta(&tblDestSpec, &backendMap)
-
- // init db_mock
- db := test_util.NewMockDB(ctrl)
- db.EXPECT().IsJobExist("Test").Return(false, nil)
- db.EXPECT().UpdateProgress("Test", gomock.Any()).DoAndReturn(
- func(_ string, progressJson string) error {
- var jobProgress JobProgress
- if err := json.Unmarshal([]byte(progressJson), &jobProgress); err != nil {
- t.Error(err)
- }
- if err := upateInMemory(&jobProgress); err != nil {
- t.Error(err)
- }
-
- inMemoryData := jobProgress.InMemoryData.(*inMemoryData)
- if inMemoryData.TxnId != txnId {
- t.Errorf("txnId missmatch: expect %d, but get %d", txnId, inMemoryData.TxnId)
- }
- return nil
- })
-
- db.EXPECT().UpdateProgress("Test", gomock.Any()).Return(nil).Times(2)
-
- // init factory
- rpcFactory := NewMockIRpcFactory(ctrl)
- metaFactory := NewMockMetaerFactory(ctrl)
- factory := NewFactory(rpcFactory, metaFactory, base.NewSpecerFactory())
-
- // init rpcFactory
- rpcFactory.EXPECT().NewFeRpc(&tblDestSpec).DoAndReturn(func(_ *base.Spec) (rpc.IFeRpc, error) {
- mockFeRpc := NewMockIFeRpc(ctrl)
- tableIds := make([]int64, 0, 1)
- tableIds = append(tableIds, tblDestSpec.TableId)
- mockFeRpc.EXPECT().BeginTransaction(&tblDestSpec, gomock.Any(), tableIds).Return(
- &festruct.TBeginTxnResult_{
- Status: &status.TStatus{
- StatusCode: status.TStatusCode_OK,
- ErrorMsgs: nil,
- },
- TxnId: &txnId,
- JobStatus: nil,
- DbId: &tblDestSpec.DbId,
- }, nil)
- return mockFeRpc, nil
- })
- rpcFactory.EXPECT().NewFeRpc(&tblDestSpec).DoAndReturn(func(_ *base.Spec) (rpc.IFeRpc, error) {
- mockFeRpc := NewMockIFeRpc(ctrl)
- mockFeRpc.EXPECT().CommitTransaction(&tblDestSpec, txnId, gomock.Any()).Return(
- &festruct.TCommitTxnResult_{
- Status: &status.TStatus{
- StatusCode: status.TStatusCode_OK,
- ErrorMsgs: nil,
- },
- }, nil)
- return mockFeRpc, nil
- })
- rpcFactory.EXPECT().NewBeRpc(gomock.Any()).DoAndReturn(func(_ *base.Backend) (rpc.IBeRpc, error) {
- mockBeRpc := NewMockIBeRpc(ctrl)
- mockBeRpc.EXPECT().IngestBinlog(gomock.Any()).DoAndReturn(
- func(req *bestruct.TIngestBinlogRequest) (*bestruct.TIngestBinlogResult_, error) {
- if req.GetTxnId() != txnId {
- t.Errorf("txnId is mismatch: %d, need %d", req.GetTxnId(), txnId)
- } else if req.GetRemoteTabletId() != srcTabletId {
- t.Errorf("remote tabletId mismatch: %d, need %d", req.GetRemoteTabletId(), srcTabletId)
- } else if req.GetBinlogVersion() != version {
- t.Errorf("version mismatch: %d, need %d", req.GetBinlogVersion(), version)
- } else if req.GetRemoteHost() != "localhost" {
- t.Errorf("remote host mismatch: %s, need localhost", req.GetRemoteHost())
- } else if req.GetPartitionId() != destPartitionId {
- t.Errorf("partitionId mismatch: %d, need %d", req.GetPartitionId(), destPartitionId)
- } else if req.GetLocalTabletId() != destTabletId {
- t.Errorf("local tabletId mismatch: %d, need %d", req.GetLocalTabletId(), destTabletId)
- }
-
- return &bestruct.TIngestBinlogResult_{
- Status: &status.TStatus{
- StatusCode: status.TStatusCode_OK,
- ErrorMsgs: nil,
- },
- }, nil
- })
-
- return mockBeRpc, nil
- }).Times(3)
-
- // init metaFactory
- metaFactory.EXPECT().NewMeta(&tblSrcSpec).DoAndReturn(func(_ *base.Spec) Metaer {
- mockMeta := NewMockMetaer(ctrl)
-
- mockMeta.EXPECT().GetBackendMap().Return(backendMap, nil)
- mockMeta.EXPECT().GetPartitionRangeMap(tblSrcSpec.TableId).DoAndReturn(
- func(tableId int64) (map[string]*PartitionMeta, error) {
- return srcMeta.Tables[tableId].PartitionRangeMap, nil
- })
- mockMeta.EXPECT().GetIndexIdMap(tblSrcSpec.TableId, srcPartitionId).DoAndReturn(
- func(tableId int64, partitionId int64) (map[int64]*IndexMeta, error) {
- return srcMeta.Tables[tableId].PartitionIdMap[partitionId].IndexIdMap, nil
- })
- mockMeta.EXPECT().GetTablets(tblSrcSpec.TableId, srcPartitionId, srcIndexId).DoAndReturn(
- func(tableId int64, partitionId int64, indexId int64) (*btree.Map[int64, *TabletMeta], error) {
- return srcMeta.Tables[tableId].PartitionIdMap[partitionId].IndexIdMap[indexId].TabletMetas, nil
- })
-
- return mockMeta
- })
- metaFactory.EXPECT().NewMeta(&tblDestSpec).DoAndReturn(func(_ *base.Spec) Metaer {
- mockMeta := NewMockMetaer(ctrl)
- mockMeta.EXPECT().GetBackendMap().Return(backendMap, nil)
- mockMeta.EXPECT().GetPartitionRangeMap(tblDestSpec.TableId).DoAndReturn(
- func(tableId int64) (map[string]*PartitionMeta, error) {
- return destMeta.Tables[tableId].PartitionRangeMap, nil
- })
- mockMeta.EXPECT().GetPartitionIdByRange(tblDestSpec.TableId, fmt.Sprint(destPartitionId)).DoAndReturn(
- func(tableId int64, partitionRange string) (int64, error) {
- return destMeta.Tables[tableId].PartitionRangeMap[partitionRange].Id, nil
- })
- mockMeta.EXPECT().GetIndexNameMap(tblDestSpec.TableId, destPartitionId).DoAndReturn(
- func(tableId int64, partitionId int64) (map[string]*IndexMeta, error) {
- return destMeta.Tables[tableId].PartitionIdMap[partitionId].IndexNameMap, nil
- })
- mockMeta.EXPECT().GetTablets(tblDestSpec.TableId, destPartitionId, destIndexId).DoAndReturn(
- func(tableId int64, partitionId int64, indexId int64) (*btree.Map[int64, *TabletMeta], error) {
- return destMeta.Tables[tableId].PartitionIdMap[partitionId].IndexIdMap[indexId].TabletMetas, nil
- })
- return mockMeta
- })
-
- // init job
- ctx := NewJobContext(tblSrcSpec, tblDestSpec, db, factory)
- job, err := NewJobFromService("Test", ctx)
- if err != nil {
- t.Error(err)
- }
- job.progress = NewJobProgress("Test", job.SyncType, db)
- job.progress.SyncState = TableIncrementalSync
- job.progress.SubSyncState = Done
-
- // init binlog
- tableIds := make([]int64, 0, 1)
- tableIds = append(tableIds, tblSrcSpec.TableId)
- binlog := newTestBinlog(festruct.TBinlogType_UPSERT, tableIds)
- upsertContext := &UpsertContext{
- Context: context.Background(),
- CommitSeq: commitSeq,
- DbId: tblSrcSpec.DbId,
- TableId: tblSrcSpec.TableId,
- TxnId: txnId,
- Version: version,
- PartitionId: srcPartitionId,
- IndexId: srcIndexId,
- TabletId: srcTabletId,
- }
- if data, err := newUpsertData(upsertContext); err != nil {
- t.Error(err)
- } else {
- binlog.SetData(&data)
- }
-
- if err := job.handleUpsert(binlog); err != nil {
- t.Error(err)
- }
-}
-
-func TestHandleUpsertInDbSync(t *testing.T) {
- ctrl := gomock.NewController(t)
- defer ctrl.Finish()
-
- // init test data
- txnId := int64(114514233)
- commitSeq := int64(114233514)
- version := int64(233114514)
- srcPartitionId := getPartitionBaseId(tableBaseId)
- srcIndexId := getIndexBaseId(srcPartitionId)
- srcTabletId := getTabletBaseId(srcIndexId)
- destPartitionId := getPartitionBaseId(tableBaseId)
- destIndexId := getIndexBaseId(destPartitionId)
- destTabletId := getTabletBaseId(destIndexId)
-
- backendMap := newBackendMap(3)
- srcMeta := newMeta(&dbSrcSpec, &backendMap)
- destMeta := newMeta(&dbDestSpec, &backendMap)
-
- // init db_mock
- db := test_util.NewMockDB(ctrl)
- db.EXPECT().IsJobExist("Test").Return(false, nil)
- db.EXPECT().UpdateProgress("Test", gomock.Any()).DoAndReturn(
- func(_ string, progressJson string) error {
- var jobProgress JobProgress
- if err := json.Unmarshal([]byte(progressJson), &jobProgress); err != nil {
- t.Error(err)
- }
- if err := upateInMemory(&jobProgress); err != nil {
- t.Error(err)
- }
-
- inMemoryData := jobProgress.InMemoryData.(*inMemoryData)
- if inMemoryData.TxnId != txnId {
- t.Errorf("txnId missmatch: expect %d, but get %d", txnId, inMemoryData.TxnId)
- }
- return nil
- })
-
- db.EXPECT().UpdateProgress("Test", gomock.Any()).Return(nil).Times(2)
-
- // init factory
- rpcFactory := NewMockIRpcFactory(ctrl)
- metaFactory := NewMockMetaerFactory(ctrl)
- factory := NewFactory(rpcFactory, metaFactory, base.NewSpecerFactory())
-
- // init rpcFactory
- rpcFactory.EXPECT().NewFeRpc(&dbDestSpec).DoAndReturn(func(_ *base.Spec) (rpc.IFeRpc, error) {
- mockFeRpc := NewMockIFeRpc(ctrl)
- tableIds := make([]int64, 0, 1)
- tableIds = append(tableIds, tableBaseId)
- mockFeRpc.EXPECT().BeginTransaction(&dbDestSpec, gomock.Any(), tableIds).Return(
- &festruct.TBeginTxnResult_{
- Status: &status.TStatus{
- StatusCode: status.TStatusCode_OK,
- ErrorMsgs: nil,
- },
- TxnId: &txnId,
- JobStatus: nil,
- DbId: &dbDestSpec.DbId,
- }, nil)
- return mockFeRpc, nil
- })
- rpcFactory.EXPECT().NewFeRpc(&dbDestSpec).DoAndReturn(func(_ *base.Spec) (rpc.IFeRpc, error) {
- mockFeRpc := NewMockIFeRpc(ctrl)
- mockFeRpc.EXPECT().CommitTransaction(&dbDestSpec, txnId, gomock.Any()).Return(
- &festruct.TCommitTxnResult_{
- Status: &status.TStatus{
- StatusCode: status.TStatusCode_OK,
- ErrorMsgs: nil,
- },
- }, nil)
- return mockFeRpc, nil
- })
- rpcFactory.EXPECT().NewBeRpc(gomock.Any()).DoAndReturn(func(_ *base.Backend) (rpc.IBeRpc, error) {
- mockBeRpc := NewMockIBeRpc(ctrl)
- mockBeRpc.EXPECT().IngestBinlog(gomock.Any()).DoAndReturn(
- func(req *bestruct.TIngestBinlogRequest) (*bestruct.TIngestBinlogResult_, error) {
- if req.GetTxnId() != txnId {
- t.Errorf("txnId is mismatch: %d, need %d", req.GetTxnId(), txnId)
- } else if req.GetRemoteTabletId() != srcTabletId {
- t.Errorf("remote tabletId mismatch: %d, need %d", req.GetRemoteTabletId(), srcTabletId)
- } else if req.GetBinlogVersion() != version {
- t.Errorf("version mismatch: %d, need %d", req.GetBinlogVersion(), version)
- } else if req.GetRemoteHost() != "localhost" {
- t.Errorf("remote host mismatch: %s, need localhost", req.GetRemoteHost())
- } else if req.GetPartitionId() != destPartitionId {
- t.Errorf("partitionId mismatch: %d, need %d", req.GetPartitionId(), destPartitionId)
- } else if req.GetLocalTabletId() != destTabletId {
- t.Errorf("local tabletId mismatch: %d, need %d", req.GetLocalTabletId(), destTabletId)
- }
-
- return &bestruct.TIngestBinlogResult_{
- Status: &status.TStatus{
- StatusCode: status.TStatusCode_OK,
- ErrorMsgs: nil,
- },
- }, nil
- })
-
- return mockBeRpc, nil
- }).Times(3)
-
- // init metaFactory
- metaFactory.EXPECT().NewMeta(&dbSrcSpec).DoAndReturn(func(_ *base.Spec) Metaer {
- mockMeta := NewMockMetaer(ctrl)
-
- mockMeta.EXPECT().GetBackendMap().Return(backendMap, nil)
- mockMeta.EXPECT().GetTableNameById(tableBaseId).Return(fmt.Sprint(tableBaseId), nil)
- mockMeta.EXPECT().GetPartitionRangeMap(tableBaseId).DoAndReturn(
- func(tableId int64) (map[string]*PartitionMeta, error) {
- return srcMeta.Tables[tableId].PartitionRangeMap, nil
- })
- mockMeta.EXPECT().GetIndexIdMap(tableBaseId, srcPartitionId).DoAndReturn(
- func(tableId int64, partitionId int64) (map[int64]*IndexMeta, error) {
- return srcMeta.Tables[tableId].PartitionIdMap[partitionId].IndexIdMap, nil
- })
- mockMeta.EXPECT().GetTablets(tableBaseId, srcPartitionId, srcIndexId).DoAndReturn(
- func(tableId int64, partitionId int64, indexId int64) (*btree.Map[int64, *TabletMeta], error) {
- return srcMeta.Tables[tableId].PartitionIdMap[partitionId].IndexIdMap[indexId].TabletMetas, nil
- })
-
- return mockMeta
- })
- metaFactory.EXPECT().NewMeta(&dbDestSpec).DoAndReturn(func(_ *base.Spec) Metaer {
- mockMeta := NewMockMetaer(ctrl)
- mockMeta.EXPECT().GetBackendMap().Return(backendMap, nil)
- mockMeta.EXPECT().GetTableId(fmt.Sprint(tableBaseId)).Return(tableBaseId, nil)
- mockMeta.EXPECT().GetPartitionRangeMap(tblDestSpec.TableId).DoAndReturn(
- func(tableId int64) (map[string]*PartitionMeta, error) {
- return destMeta.Tables[tableId].PartitionRangeMap, nil
- })
- mockMeta.EXPECT().GetPartitionIdByRange(tblDestSpec.TableId, fmt.Sprint(destPartitionId)).DoAndReturn(
- func(tableId int64, partitionRange string) (int64, error) {
- return destMeta.Tables[tableId].PartitionRangeMap[partitionRange].Id, nil
- })
- mockMeta.EXPECT().GetIndexNameMap(tblDestSpec.TableId, destPartitionId).DoAndReturn(
- func(tableId int64, partitionId int64) (map[string]*IndexMeta, error) {
- return destMeta.Tables[tableId].PartitionIdMap[partitionId].IndexNameMap, nil
- })
- mockMeta.EXPECT().GetTablets(tblDestSpec.TableId, destPartitionId, destIndexId).DoAndReturn(
- func(tableId int64, partitionId int64, indexId int64) (*btree.Map[int64, *TabletMeta], error) {
- return destMeta.Tables[tableId].PartitionIdMap[partitionId].IndexIdMap[indexId].TabletMetas, nil
- })
- return mockMeta
- })
-
- // init job
- ctx := NewJobContext(dbSrcSpec, dbDestSpec, db, factory)
- job, err := NewJobFromService("Test", ctx)
- if err != nil {
- t.Error(err)
- }
- job.progress = NewJobProgress("Test", job.SyncType, db)
- job.progress.SyncState = DBIncrementalSync
- job.progress.SubSyncState = Done
-
- // init binlog
- tableIds := make([]int64, 0, 1)
- tableIds = append(tableIds, tableBaseId)
- binlog := newTestBinlog(festruct.TBinlogType_UPSERT, tableIds)
- upsertContext := &UpsertContext{
- Context: context.Background(),
- CommitSeq: commitSeq,
- DbId: dbSrcSpec.DbId,
- TableId: tableBaseId,
- TxnId: txnId,
- Version: version,
- PartitionId: srcPartitionId,
- IndexId: srcIndexId,
- TabletId: srcTabletId,
- }
- if data, err := newUpsertData(upsertContext); err != nil {
- t.Error(err)
- } else {
- binlog.SetData(&data)
- }
-
- if err := job.handleUpsert(binlog); err != nil {
- t.Error(err)
- }
-}
-
-func TestHandleAddPartitionInTableSync(t *testing.T) {
- ctrl := gomock.NewController(t)
- defer ctrl.Finish()
-
- // init data
- testSql := "ADD PARTITION `zero_to_five` VALUES [(\"0\"), (\"5\"))(\"version_info\" \u003d \"1\");"
-
- // init db_mock
- db := test_util.NewMockDB(ctrl)
- db.EXPECT().IsJobExist("Test").Return(false, nil)
-
- // init factory
- iSpecFactory := NewMockSpecerFactory(ctrl)
- factory := NewFactory(rpc.NewRpcFactory(), NewMetaFactory(), iSpecFactory)
-
- // init iSpecFactory
- iSpecFactory.EXPECT().NewSpecer(&tblSrcSpec).DoAndReturn(func(_ *base.Spec) base.Specer {
- mockISpec := NewMockSpecer(ctrl)
- mockISpec.EXPECT().Valid().Return(nil)
- return mockISpec
- })
- iSpecFactory.EXPECT().NewSpecer(&tblDestSpec).DoAndReturn(func(_ *base.Spec) base.Specer {
- mockISpec := NewMockSpecer(ctrl)
- fullSql := fmt.Sprintf("ALTER TABLE %s.%s %s", tblDestSpec.Database, tblDestSpec.Table, testSql)
- mockISpec.EXPECT().Exec(fullSql).Return(nil)
- mockISpec.EXPECT().Valid().Return(nil)
- return mockISpec
- })
-
- // init job
- ctx := NewJobContext(tblSrcSpec, tblDestSpec, db, factory)
- job, err := NewJobFromService("Test", ctx)
- if err != nil {
- t.Error(err)
- }
-
- // init binlog
- tableIds := make([]int64, 0, 1)
- tableIds = append(tableIds, tblSrcSpec.TableId)
- binlog := newTestBinlog(festruct.TBinlogType_ADD_PARTITION, tableIds)
- dataMap := make(map[string]interface{})
- dataMap["tableId"] = tblSrcSpec.TableId
- dataMap["sql"] = testSql
- if data, err := json.Marshal(dataMap); err != nil {
- t.Error(err)
- } else {
- dataStr := string(data)
- binlog.SetData(&dataStr)
- }
-
- // test begin
- if err := job.handleAddPartition(binlog); err != nil {
- t.Error(err)
- }
-}
-
-func TestHandleAddPartitionInDbSync(t *testing.T) {
- ctrl := gomock.NewController(t)
- defer ctrl.Finish()
-
- // init data
- testSql := "ADD PARTITION `zero_to_five` VALUES [(\"0\"), (\"5\"))(\"version_info\" \u003d \"1\");"
-
- // init db_mock
- db := test_util.NewMockDB(ctrl)
- db.EXPECT().IsJobExist("Test").Return(false, nil)
-
- // init factory
- metaFactory := NewMockMetaerFactory(ctrl)
- iSpecFactory := NewMockSpecerFactory(ctrl)
- factory := NewFactory(rpc.NewRpcFactory(), metaFactory, iSpecFactory)
-
- // init metaFactory
- metaFactory.EXPECT().NewMeta(&dbSrcSpec).Return(NewMockMetaer(ctrl))
- metaFactory.EXPECT().NewMeta(&dbDestSpec).DoAndReturn(func(_ *base.Spec) Metaer {
- mockMeta := NewMockMetaer(ctrl)
- mockMeta.EXPECT().GetTableNameById(tableBaseId).Return(fmt.Sprint(tableBaseId), nil)
- return mockMeta
- })
-
- // init iSpecFactory
- iSpecFactory.EXPECT().NewSpecer(&dbSrcSpec).DoAndReturn(func(_ *base.Spec) base.Specer {
- mockISpec := NewMockSpecer(ctrl)
- mockISpec.EXPECT().Valid().Return(nil)
- return mockISpec
- })
- iSpecFactory.EXPECT().NewSpecer(&dbDestSpec).DoAndReturn(func(_ *base.Spec) base.Specer {
- mockISpec := NewMockSpecer(ctrl)
- fullSql := fmt.Sprintf("ALTER TABLE %s.%s %s", dbDestSpec.Database, fmt.Sprint(tableBaseId), testSql)
- mockISpec.EXPECT().Exec(fullSql).Return(nil)
- mockISpec.EXPECT().Valid().Return(nil)
- return mockISpec
- })
-
- // init job
- ctx := NewJobContext(dbSrcSpec, dbDestSpec, db, factory)
- job, err := NewJobFromService("Test", ctx)
- if err != nil {
- t.Error(err)
- }
-
- // init binlog
- tableIds := make([]int64, 0, 1)
- tableIds = append(tableIds, tableBaseId)
- binlog := newTestBinlog(festruct.TBinlogType_ADD_PARTITION, tableIds)
- dataMap := make(map[string]interface{})
- dataMap["tableId"] = tableBaseId
- dataMap["sql"] = testSql
- if data, err := json.Marshal(dataMap); err != nil {
- t.Error(err)
- } else {
- dataStr := string(data)
- binlog.SetData(&dataStr)
- }
-
- // test begin
- if err := job.handleAddPartition(binlog); err != nil {
- t.Error(err)
- }
-}
-
-func TestHandleDropPartitionInTableSync(t *testing.T) {
- ctrl := gomock.NewController(t)
- defer ctrl.Finish()
-
- // init data
- testSql := "DROP PARTITION `zero_to_five`"
-
- // init db_mock
- db := test_util.NewMockDB(ctrl)
- db.EXPECT().IsJobExist("Test").Return(false, nil)
-
- // init factory
- iSpecFactory := NewMockSpecerFactory(ctrl)
- factory := NewFactory(rpc.NewRpcFactory(), NewMetaFactory(), iSpecFactory)
-
- // init iSpecFactory
- iSpecFactory.EXPECT().NewSpecer(&tblSrcSpec).DoAndReturn(func(_ *base.Spec) base.Specer {
- mockISpec := NewMockSpecer(ctrl)
- mockISpec.EXPECT().Valid().Return(nil)
- return mockISpec
- })
- iSpecFactory.EXPECT().NewSpecer(&tblDestSpec).DoAndReturn(func(_ *base.Spec) base.Specer {
- mockISpec := NewMockSpecer(ctrl)
- fullSql := fmt.Sprintf("ALTER TABLE %s.%s %s", tblDestSpec.Database, tblDestSpec.Table, testSql)
- mockISpec.EXPECT().Exec(fullSql).Return(nil)
- mockISpec.EXPECT().Valid().Return(nil)
- return mockISpec
- })
-
- // init job
- ctx := NewJobContext(tblSrcSpec, tblDestSpec, db, factory)
- job, err := NewJobFromService("Test", ctx)
- if err != nil {
- t.Error(err)
- }
-
- // init binlog
- tableIds := make([]int64, 0, 1)
- tableIds = append(tableIds, tblSrcSpec.TableId)
- binlog := newTestBinlog(festruct.TBinlogType_ADD_PARTITION, tableIds)
- dataMap := make(map[string]interface{})
- dataMap["tableId"] = tblSrcSpec.TableId
- dataMap["sql"] = testSql
- if data, err := json.Marshal(dataMap); err != nil {
- t.Error(err)
- } else {
- dataStr := string(data)
- binlog.SetData(&dataStr)
- }
-
- // test begin
- if err := job.handleDropPartition(binlog); err != nil {
- t.Error(err)
- }
-}
-
-func TestHandleDropPartitionInDbSync(t *testing.T) {
- ctrl := gomock.NewController(t)
- defer ctrl.Finish()
-
- // init data
- testSql := "DROP PARTITION `zero_to_five`"
-
- // init db_mock
- db := test_util.NewMockDB(ctrl)
- db.EXPECT().IsJobExist("Test").Return(false, nil)
-
- // init factory
- metaFactory := NewMockMetaerFactory(ctrl)
- iSpecFactory := NewMockSpecerFactory(ctrl)
- factory := NewFactory(rpc.NewRpcFactory(), metaFactory, iSpecFactory)
-
- // init metaFactory
- metaFactory.EXPECT().NewMeta(&dbSrcSpec).Return(NewMockMetaer(ctrl))
- metaFactory.EXPECT().NewMeta(&dbDestSpec).DoAndReturn(func(_ *base.Spec) Metaer {
- mockMeta := NewMockMetaer(ctrl)
- mockMeta.EXPECT().GetTableNameById(tableBaseId).Return(fmt.Sprint(tableBaseId), nil)
- return mockMeta
- })
-
- // init iSpecFactory
- iSpecFactory.EXPECT().NewSpecer(&dbSrcSpec).DoAndReturn(func(_ *base.Spec) base.Specer {
- mockISpec := NewMockSpecer(ctrl)
- mockISpec.EXPECT().Valid().Return(nil)
- return mockISpec
- })
- iSpecFactory.EXPECT().NewSpecer(&dbDestSpec).DoAndReturn(func(_ *base.Spec) base.Specer {
- mockISpec := NewMockSpecer(ctrl)
- fullSql := fmt.Sprintf("ALTER TABLE %s.%s %s", dbDestSpec.Database, fmt.Sprint(tableBaseId), testSql)
- mockISpec.EXPECT().Exec(fullSql).Return(nil)
- mockISpec.EXPECT().Valid().Return(nil)
- return mockISpec
- })
-
- // init job
- ctx := NewJobContext(dbSrcSpec, dbDestSpec, db, factory)
- job, err := NewJobFromService("Test", ctx)
- if err != nil {
- t.Error(err)
- }
-
- // init binlog
- tableIds := make([]int64, 0, 1)
- tableIds = append(tableIds, tableBaseId)
- binlog := newTestBinlog(festruct.TBinlogType_ADD_PARTITION, tableIds)
- dataMap := make(map[string]interface{})
- dataMap["tableId"] = tableBaseId
- dataMap["sql"] = testSql
- if data, err := json.Marshal(dataMap); err != nil {
- t.Error(err)
- } else {
- dataStr := string(data)
- binlog.SetData(&dataStr)
- }
-
- // test begin
- if err := job.handleAddPartition(binlog); err != nil {
- t.Error(err)
- }
-}
-
-func TestHandleCreateTable(t *testing.T) {
- ctrl := gomock.NewController(t)
- defer ctrl.Finish()
-
- // init data
- testSql := "A CREATE TABLE SQL"
-
- // init db_mock
- db := test_util.NewMockDB(ctrl)
- db.EXPECT().IsJobExist("Test").Return(false, nil)
-
- // init factory
- metaFactory := NewMockMetaerFactory(ctrl)
- iSpecFactory := NewMockSpecerFactory(ctrl)
- factory := NewFactory(rpc.NewRpcFactory(), metaFactory, iSpecFactory)
-
- // init metaFactory
- metaFactory.EXPECT().NewMeta(&dbSrcSpec).DoAndReturn(func(_ *base.Spec) Metaer {
- mockMeta := NewMockMetaer(ctrl)
- mockMeta.EXPECT().GetTables().Return(make(map[int64]*TableMeta), nil)
- return mockMeta
- })
- metaFactory.EXPECT().NewMeta(&dbDestSpec).DoAndReturn(func(_ *base.Spec) Metaer {
- mockMeta := NewMockMetaer(ctrl)
- mockMeta.EXPECT().GetTables().Return(make(map[int64]*TableMeta), nil)
- return mockMeta
- })
-
- // init iSpecFactory
- iSpecFactory.EXPECT().NewSpecer(&dbSrcSpec).DoAndReturn(func(_ *base.Spec) base.Specer {
- mockISpec := NewMockSpecer(ctrl)
- mockISpec.EXPECT().Valid().Return(nil)
- return mockISpec
- })
- iSpecFactory.EXPECT().NewSpecer(&dbDestSpec).DoAndReturn(func(_ *base.Spec) base.Specer {
- mockISpec := NewMockSpecer(ctrl)
- mockISpec.EXPECT().DbExec(testSql).Return(nil)
- mockISpec.EXPECT().Valid().Return(nil)
- return mockISpec
- })
-
- // init job
- ctx := NewJobContext(dbSrcSpec, dbDestSpec, db, factory)
- job, err := NewJobFromService("Test", ctx)
- if err != nil {
- t.Error(err)
- }
-
- // init binlog
- tableIds := make([]int64, 0, 1)
- tableIds = append(tableIds, tableBaseId)
- binlog := newTestBinlog(festruct.TBinlogType_ADD_PARTITION, tableIds)
- dataMap := make(map[string]interface{})
- dataMap["dbId"] = dbSrcSpec.DbId
- dataMap["tableId"] = tableBaseId
- dataMap["sql"] = testSql
- if data, err := json.Marshal(dataMap); err != nil {
- t.Error(err)
- } else {
- dataStr := string(data)
- binlog.SetData(&dataStr)
- }
-
- // test begin
- if err := job.handleCreateTable(binlog); err != nil {
- t.Error(err)
- }
-}
-
-func TestHandleDropTable(t *testing.T) {
- ctrl := gomock.NewController(t)
- defer ctrl.Finish()
-
- // init data
- testSql := "A DROP TABLE SQL"
-
- // init db_mock
- db := test_util.NewMockDB(ctrl)
- db.EXPECT().IsJobExist("Test").Return(false, nil)
-
- // init factory
- metaFactory := NewMockMetaerFactory(ctrl)
- iSpecFactory := NewMockSpecerFactory(ctrl)
- factory := NewFactory(rpc.NewRpcFactory(), metaFactory, iSpecFactory)
-
- // init metaFactory
- metaFactory.EXPECT().NewMeta(&dbSrcSpec).DoAndReturn(func(_ *base.Spec) Metaer {
- mockMeta := NewMockMetaer(ctrl)
- mockMeta.EXPECT().GetTables().Return(make(map[int64]*TableMeta), nil)
- return mockMeta
- })
- metaFactory.EXPECT().NewMeta(&dbDestSpec).DoAndReturn(func(_ *base.Spec) Metaer {
- mockMeta := NewMockMetaer(ctrl)
- mockMeta.EXPECT().GetTables().Return(make(map[int64]*TableMeta), nil)
- return mockMeta
- })
-
- // init iSpecFactory
- iSpecFactory.EXPECT().NewSpecer(&dbSrcSpec).DoAndReturn(func(_ *base.Spec) base.Specer {
- mockISpec := NewMockSpecer(ctrl)
- mockISpec.EXPECT().Valid().Return(nil)
- return mockISpec
- })
- iSpecFactory.EXPECT().NewSpecer(&dbDestSpec).DoAndReturn(func(_ *base.Spec) base.Specer {
- mockISpec := NewMockSpecer(ctrl)
- dropSql := fmt.Sprintf("DROP TABLE %v FORCE", tableBaseId)
- mockISpec.EXPECT().DbExec(dropSql).Return(nil)
- mockISpec.EXPECT().Valid().Return(nil)
- return mockISpec
- })
-
- // init job
- ctx := NewJobContext(dbSrcSpec, dbDestSpec, db, factory)
- job, err := NewJobFromService("Test", ctx)
- if err != nil {
- t.Error(err)
- }
-
- // init binlog
- tableIds := make([]int64, 0, 1)
- tableIds = append(tableIds, tableBaseId)
- binlog := newTestBinlog(festruct.TBinlogType_ADD_PARTITION, tableIds)
- dataMap := make(map[string]interface{})
- dataMap["dbId"] = dbSrcSpec.DbId
- dataMap["tableId"] = tableBaseId
- dataMap["tableName"] = fmt.Sprint(tableBaseId)
- dataMap["rawSql"] = testSql
- if data, err := json.Marshal(dataMap); err != nil {
- t.Error(err)
- } else {
- dataStr := string(data)
- binlog.SetData(&dataStr)
- }
-
- // test begin
- if err := job.handleDropTable(binlog); err != nil {
- t.Error(err)
- }
-}
-
-func TestHandleDummyInTableSync(t *testing.T) {
- ctrl := gomock.NewController(t)
- defer ctrl.Finish()
-
- // init data
- dummyCommitSeq := int64(114514)
-
- // init db_mock
- db := test_util.NewMockDB(ctrl)
- db.EXPECT().IsJobExist("Test").Return(false, nil)
- db.EXPECT().UpdateProgress("Test", gomock.Any()).DoAndReturn(
- func(_ string, progressJson string) error {
- var jobProgress JobProgress
- if err := json.Unmarshal([]byte(progressJson), &jobProgress); err != nil {
- t.Error(err)
- } else if jobProgress.CommitSeq != dummyCommitSeq {
- t.Errorf("UnExpect CommitSeq %v, need %v", jobProgress.CommitSeq, dummyCommitSeq)
- } else if jobProgress.SyncState != TableFullSync {
- t.Errorf("UnExpect SyncState %v, need %v", jobProgress.SyncState, TableFullSync)
- } else if jobProgress.SubSyncState != BeginCreateSnapshot {
- t.Errorf("UnExpect SubSyncState %v, need %v", jobProgress.SubSyncState, BeginCreateSnapshot)
- }
- return nil
- })
-
- // init factory
- factory := NewFactory(rpc.NewRpcFactory(), NewMetaFactory(), base.NewSpecerFactory())
-
- // init job
- ctx := NewJobContext(tblSrcSpec, tblDestSpec, db, factory)
- job, err := NewJobFromService("Test", ctx)
- if err != nil {
- t.Error(err)
- }
- job.progress = NewJobProgress("Test", job.SyncType, db)
-
- // init binlog
- binlog := newTestBinlog(festruct.TBinlogType_DUMMY, nil)
- binlog.SetCommitSeq(&dummyCommitSeq)
-
- // test begin
- if err := job.handleDummy(binlog); err != nil {
- t.Error(err)
- }
-}
-
-func TestHandleDummyInDbSync(t *testing.T) {
- ctrl := gomock.NewController(t)
- defer ctrl.Finish()
-
- // init data
- dummyCommitSeq := int64(114514)
-
- // init db_mock
- db := test_util.NewMockDB(ctrl)
- db.EXPECT().IsJobExist("Test").Return(false, nil)
- db.EXPECT().UpdateProgress("Test", gomock.Any()).DoAndReturn(
- func(_ string, progressJson string) error {
- var jobProgress JobProgress
- if err := json.Unmarshal([]byte(progressJson), &jobProgress); err != nil {
- t.Error(err)
- } else if jobProgress.CommitSeq != dummyCommitSeq {
- t.Errorf("UnExpect CommitSeq %v, need %v", jobProgress.CommitSeq, dummyCommitSeq)
- } else if jobProgress.SyncState != DBFullSync {
- t.Errorf("UnExpect SyncState %v, need %v", jobProgress.SyncState, DBFullSync)
- } else if jobProgress.SubSyncState != BeginCreateSnapshot {
- t.Errorf("UnExpect SubSyncState %v, need %v", jobProgress.SubSyncState, BeginCreateSnapshot)
- }
- return nil
- })
-
- // init factory
- factory := NewFactory(rpc.NewRpcFactory(), NewMetaFactory(), base.NewSpecerFactory())
-
- // init job
- ctx := NewJobContext(dbSrcSpec, dbDestSpec, db, factory)
- job, err := NewJobFromService("Test", ctx)
- if err != nil {
- t.Error(err)
- }
- job.progress = NewJobProgress("Test", job.SyncType, db)
-
- // init binlog
- binlog := newTestBinlog(festruct.TBinlogType_DUMMY, nil)
- binlog.SetCommitSeq(&dummyCommitSeq)
-
- // test begin
- if err := job.handleDummy(binlog); err != nil {
- t.Error(err)
- }
-}
-
-func TestHandleAlterJobInTableSync(t *testing.T) {
- ctrl := gomock.NewController(t)
- defer ctrl.Finish()
-
- // init data
- commitSeq := int64(233114514)
- alterType := "UNUSED_TYPE"
- jobId := int64(114514233)
- jobState := "FINISHED"
- rawSql := "A blank SQL"
-
- // init db_mock
- db := test_util.NewMockDB(ctrl)
- db.EXPECT().IsJobExist("Test").Return(false, nil)
- db.EXPECT().UpdateProgress("Test", gomock.Any()).DoAndReturn(
- func(_ string, progressJson string) error {
- var jobProgress JobProgress
- if err := json.Unmarshal([]byte(progressJson), &jobProgress); err != nil {
- t.Error(err)
- } else if jobProgress.CommitSeq != commitSeq {
- t.Errorf("UnExpect CommitSeq %v, need %v", jobProgress.CommitSeq, commitSeq)
- } else if jobProgress.SyncState != TableFullSync {
- t.Errorf("UnExpect SyncState %v, need %v", jobProgress.SyncState, TableFullSync)
- } else if jobProgress.SubSyncState != BeginCreateSnapshot {
- t.Errorf("UnExpect SubSyncState %v, need %v", jobProgress.SubSyncState, BeginCreateSnapshot)
- }
- return nil
- })
-
- // init factory
- metaFactory := NewMockMetaerFactory(ctrl)
- factory := NewFactory(rpc.NewRpcFactory(), metaFactory, base.NewSpecerFactory())
-
- // init metaFactory
- metaFactory.EXPECT().NewMeta(&tblSrcSpec).Return(NewMockMetaer(ctrl))
- metaFactory.EXPECT().NewMeta(&tblDestSpec).DoAndReturn(func(_ *base.Spec) Metaer {
- mockMeta := NewMockMetaer(ctrl)
- dropSql := fmt.Sprintf("DROP TABLE %s FORCE", tblDestSpec.Table)
- mockMeta.EXPECT().DbExec(dropSql).Return(nil)
- return mockMeta
- })
-
- // init job
- ctx := NewJobContext(tblSrcSpec, tblDestSpec, db, factory)
- job, err := NewJobFromService("Test", ctx)
- if err != nil {
- t.Error(err)
- }
- job.progress = NewJobProgress("Test", job.SyncType, db)
- job.progress.CommitSeq = commitSeq
-
- // init binlog
- binlog := newTestBinlog(festruct.TBinlogType_ALTER_JOB, nil)
- dataMap := make(map[string]interface{})
- dataMap["type"] = alterType
- dataMap["dbId"] = tblSrcSpec.DbId
- dataMap["tableId"] = tblSrcSpec.TableId
- dataMap["tableName"] = fmt.Sprint(tblSrcSpec.Table)
- dataMap["jobId"] = jobId
- dataMap["jobState"] = jobState
- dataMap["rawSql"] = rawSql
- if data, err := json.Marshal(dataMap); err != nil {
- t.Error(err)
- } else {
- dataStr := string(data)
- binlog.SetData(&dataStr)
- }
-
- // test begin
- if err := job.handleAlterJob(binlog); err != nil {
- t.Error(err)
- }
-}
-
-func TestHandleAlterJobInDbSync(t *testing.T) {
- ctrl := gomock.NewController(t)
- defer ctrl.Finish()
-
- // init data
- commitSeq := int64(233114514)
- alterType := "UNUSED_TYPE"
- jobId := int64(114514233)
- jobState := "FINISHED"
- rawSql := "A blank SQL"
-
- // init db_mock
- db := test_util.NewMockDB(ctrl)
- db.EXPECT().IsJobExist("Test").Return(false, nil)
- db.EXPECT().UpdateProgress("Test", gomock.Any()).DoAndReturn(
- func(_ string, progressJson string) error {
- var jobProgress JobProgress
- if err := json.Unmarshal([]byte(progressJson), &jobProgress); err != nil {
- t.Error(err)
- } else if jobProgress.CommitSeq != commitSeq {
- t.Errorf("UnExpect CommitSeq %v, need %v", jobProgress.CommitSeq, commitSeq)
- } else if jobProgress.SyncState != DBFullSync {
- t.Errorf("UnExpect SyncState %v, need %v", jobProgress.SyncState, DBFullSync)
- } else if jobProgress.SubSyncState != BeginCreateSnapshot {
- t.Errorf("UnExpect SubSyncState %v, need %v", jobProgress.SubSyncState, BeginCreateSnapshot)
- }
- return nil
- })
-
- // init factory
- metaFactory := NewMockMetaerFactory(ctrl)
- factory := NewFactory(rpc.NewRpcFactory(), metaFactory, base.NewSpecerFactory())
-
- // init metaFactory
- metaFactory.EXPECT().NewMeta(&dbSrcSpec).Return(NewMockMetaer(ctrl))
- metaFactory.EXPECT().NewMeta(&dbDestSpec).DoAndReturn(func(_ *base.Spec) Metaer {
- mockMeta := NewMockMetaer(ctrl)
- dropSql := fmt.Sprintf("DROP TABLE %s FORCE", fmt.Sprint(tableBaseId))
- mockMeta.EXPECT().DbExec(dropSql).Return(nil)
- return mockMeta
- })
-
- // init job
- ctx := NewJobContext(dbSrcSpec, dbDestSpec, db, factory)
- job, err := NewJobFromService("Test", ctx)
- if err != nil {
- t.Error(err)
- }
- job.progress = NewJobProgress("Test", job.SyncType, db)
- job.progress.CommitSeq = commitSeq
-
- // init binlog
- binlog := newTestBinlog(festruct.TBinlogType_ALTER_JOB, nil)
- dataMap := make(map[string]interface{})
- dataMap["type"] = alterType
- dataMap["dbId"] = dbSrcSpec.DbId
- dataMap["tableId"] = tableBaseId
- dataMap["tableName"] = fmt.Sprint(tableBaseId)
- dataMap["jobId"] = jobId
- dataMap["jobState"] = jobState
- dataMap["rawSql"] = rawSql
- if data, err := json.Marshal(dataMap); err != nil {
- t.Error(err)
- } else {
- dataStr := string(data)
- binlog.SetData(&dataStr)
- }
-
- // test begin
- if err := job.handleAlterJob(binlog); err != nil {
- t.Error(err)
- }
-}
-
-func TestHandleLightningSchemaChange(t *testing.T) {
- ctrl := gomock.NewController(t)
- defer ctrl.Finish()
-
- // init data
- testSql := fmt.Sprintf("`default_cluster:%s`.`%s` a test sql", tblSrcSpec.Database, tblSrcSpec.Table)
-
- // init db_mock
- db := test_util.NewMockDB(ctrl)
- db.EXPECT().IsJobExist("Test").Return(false, nil)
-
- // init factory
- iSpecFactory := NewMockSpecerFactory(ctrl)
- factory := NewFactory(rpc.NewRpcFactory(), NewMetaFactory(), iSpecFactory)
-
- // init iSpecFactory
- iSpecFactory.EXPECT().NewSpecer(&tblSrcSpec).DoAndReturn(func(_ *base.Spec) base.Specer {
- mockISpec := NewMockSpecer(ctrl)
- mockISpec.EXPECT().Valid().Return(nil)
- return mockISpec
- })
- iSpecFactory.EXPECT().NewSpecer(&tblDestSpec).DoAndReturn(func(_ *base.Spec) base.Specer {
- mockISpec := NewMockSpecer(ctrl)
- execSql := fmt.Sprintf("`%s` a test sql", tblSrcSpec.Table)
- mockISpec.EXPECT().DbExec(execSql).Return(nil)
- mockISpec.EXPECT().Valid().Return(nil)
- return mockISpec
- })
-
- // init job
- ctx := NewJobContext(tblSrcSpec, tblDestSpec, db, factory)
- job, err := NewJobFromService("Test", ctx)
- if err != nil {
- t.Error(err)
- }
- job.progress = NewJobProgress("Test", job.SyncType, db)
-
- // init binlog
- binlog := newTestBinlog(festruct.TBinlogType_MODIFY_TABLE_ADD_OR_DROP_COLUMNS, nil)
- dataMap := make(map[string]interface{})
- dataMap["dbId"] = tblSrcSpec.DbId
- dataMap["tableId"] = tblSrcSpec.TableId
- dataMap["rawSql"] = testSql
- if data, err := json.Marshal(dataMap); err != nil {
- t.Error(err)
- } else {
- dataStr := string(data)
- binlog.SetData(&dataStr)
- }
-
- // test begin
- if err := job.handleLightningSchemaChange(binlog); err != nil {
- t.Error(err)
- }
-}
diff --git a/pkg/ccr/label.go b/pkg/ccr/label.go
new file mode 100644
index 00000000..43a1c4a7
--- /dev/null
+++ b/pkg/ccr/label.go
@@ -0,0 +1,33 @@
+package ccr
+
+import (
+ "fmt"
+ "time"
+)
+
+// snapshot name format "ccrs_${ccr_name}_${sync_id}"
+func NewSnapshotLabelPrefix(ccrName string, syncId int64) string {
+ return fmt.Sprintf("ccrs_%s_%d", ccrName, syncId)
+}
+
+// snapshot name format "ccrp_${ccr_name}_${sync_id}"
+func NewPartialSnapshotLabelPrefix(ccrName string, syncId int64) string {
+ return fmt.Sprintf("ccrp_%s_%d", ccrName, syncId)
+}
+
+func NewLabelWithTs(prefix string) string {
+ return fmt.Sprintf("%s_%d", prefix, time.Now().Unix())
+}
+
+func NewRestoreLabel(snapshotName string) string {
+ if snapshotName == "" {
+ return ""
+ }
+
+ // use current seconds
+ return fmt.Sprintf("%s_r_%d", snapshotName, time.Now().Unix())
+}
+
+func TableAlias(tableName string) string {
+ return fmt.Sprintf("__ccr_%s_%d", tableName, time.Now().Unix())
+}
diff --git a/pkg/ccr/meta.go b/pkg/ccr/meta.go
index f45cf72b..2f60883e 100644
--- a/pkg/ccr/meta.go
+++ b/pkg/ccr/meta.go
@@ -9,6 +9,7 @@ import (
"github.com/selectdb/ccr_syncer/pkg/ccr/base"
"github.com/selectdb/ccr_syncer/pkg/rpc"
+ tstatus "github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/status"
utils "github.com/selectdb/ccr_syncer/pkg/utils"
"github.com/selectdb/ccr_syncer/pkg/xerror"
@@ -20,6 +21,9 @@ const (
degree = 128
showErrMsg = "show proc '/dbs/' failed"
+
+ TABLE_TYPE_OLAP = "OLAP"
+ TABLE_TYPE_VIEW = "VIEW"
)
// All Update* functions force to update meta from fe
@@ -41,6 +45,19 @@ type Meta struct {
BackendHostPort2IdMap map[string]int64
}
+func NewMeta(spec *base.Spec) *Meta {
+ return &Meta{
+ Spec: spec,
+ DatabaseMeta: DatabaseMeta{
+ Tables: make(map[int64]*TableMeta),
+ },
+ Backends: make(map[int64]*base.Backend),
+ DatabaseName2IdMap: make(map[string]int64),
+ TableName2IdMap: make(map[string]int64),
+ BackendHostPort2IdMap: make(map[string]int64),
+ }
+}
+
func (m *Meta) GetDbId() (int64, error) {
dbName := m.Database
@@ -53,9 +70,9 @@ func (m *Meta) GetDbId() (int64, error) {
// +-------+------------------------------------+----------+----------+-------------+--------------------------+--------------+--------------+------------------+
// | DbId | DbName | TableNum | Size | Quota | LastConsistencyCheckTime | ReplicaCount | ReplicaQuota | TransactionQuota |
// +-------+------------------------------------+----------+----------+-------------+--------------------------+--------------+--------------+------------------+
- // | 0 | default_cluster:information_schema | 24 | 0.000 | 1024.000 TB | NULL | 0 | 1073741824 | 100 |
- // | 10002 | default_cluster:__internal_schema | 4 | 0.000 | 1024.000 TB | NULL | 28 | 1073741824 | 100 |
- // | 10116 | default_cluster:ccr | 2 | 2.738 KB | 1024.000 TB | NULL | 27 | 1073741824 | 100 |
+ // | 0 | information_schema | 24 | 0.000 | 1024.000 TB | NULL | 0 | 1073741824 | 100 |
+ // | 10002 | __internal_schema | 4 | 0.000 | 1024.000 TB | NULL | 28 | 1073741824 | 100 |
+ // | 10116 | ccr | 2 | 2.738 KB | 1024.000 TB | NULL | 27 | 1073741824 | 100 |
// +-------+------------------------------------+----------+----------+-------------+--------------------------+--------------+--------------+------------------+
db, err := m.Connect()
if err != nil {
@@ -84,7 +101,9 @@ func (m *Meta) GetDbId() (int64, error) {
}
// match parsedDbname == dbname, return dbId
- if parsedDbName == dbFullName {
+ // the default_cluster prefix of db name will be removed in Doris v2.1.
+ // here we compare both db name and db full name to make it compatible.
+ if parsedDbName == dbName || parsedDbName == dbFullName {
m.DatabaseName2IdMap[dbFullName] = dbId
m.DatabaseMeta.Id = dbId
return dbId, nil
@@ -96,6 +115,7 @@ func (m *Meta) GetDbId() (int64, error) {
}
// not found
+ // ATTN: we don't treat db not found as xerror.Meta category.
return 0, xerror.Errorf(xerror.Normal, "%s not found dbId", dbFullName)
}
@@ -105,6 +125,7 @@ func (m *Meta) GetFullTableName(tableName string) string {
return fullTableName
}
+// Update table meta, return xerror.Meta category if no such table exists.
func (m *Meta) UpdateTable(tableName string, tableId int64) (*TableMeta, error) {
log.Infof("UpdateTable tableName: %s, tableId: %d", tableName, tableId)
@@ -127,6 +148,7 @@ func (m *Meta) UpdateTable(tableName string, tableId int64) (*TableMeta, error)
}
query := fmt.Sprintf("show proc '/dbs/%d/'", dbId)
+ log.Infof("UpdateTable Sql: %s", query)
rows, err := db.Query(query)
if err != nil {
return nil, xerror.Wrap(err, xerror.Normal, query)
@@ -169,7 +191,7 @@ func (m *Meta) UpdateTable(tableName string, tableId int64) (*TableMeta, error)
}
// not found
- return nil, xerror.Errorf(xerror.Normal, "tableId %v not found table", tableId)
+ return nil, xerror.Errorf(xerror.Meta, "tableName %s tableId %v not found table", tableName, tableId)
}
func (m *Meta) GetTable(tableId int64) (*TableMeta, error) {
@@ -245,10 +267,6 @@ func (m *Meta) UpdatePartitions(tableId int64) error {
if err != nil {
return xerror.Wrapf(err, xerror.Normal, query)
}
- partitionKey, err := rowParser.GetString("PartitionKey")
- if err != nil {
- return xerror.Wrapf(err, xerror.Normal, query)
- }
partitionRange, err := rowParser.GetString("Range")
if err != nil {
return xerror.Wrapf(err, xerror.Normal, query)
@@ -258,7 +276,6 @@ func (m *Meta) UpdatePartitions(tableId int64) error {
TableMeta: table,
Id: partitionId,
Name: partitionName,
- Key: partitionKey,
Range: partitionRange,
}
partitions = append(partitions, partition)
@@ -293,7 +310,7 @@ func (m *Meta) getPartitionsWithUpdate(tableId int64, depth int64) (map[int64]*P
func (m *Meta) getPartitions(tableId int64, depth int64) (map[int64]*PartitionMeta, error) {
if depth >= 3 {
- return nil, fmt.Errorf("getPartitions depth >= 3")
+ return nil, xerror.Errorf(xerror.Normal, "getPartitions depth >= 3")
}
tableMeta, err := m.GetTable(tableId)
@@ -307,10 +324,12 @@ func (m *Meta) getPartitions(tableId int64, depth int64) (map[int64]*PartitionMe
return tableMeta.PartitionIdMap, nil
}
+// Get partition id map, return xerror.Meta category if no such table exists.
func (m *Meta) GetPartitionIdMap(tableId int64) (map[int64]*PartitionMeta, error) {
return m.getPartitions(tableId, 0)
}
+// Get partition range map, return xerror.Meta category if no such table exists.
func (m *Meta) GetPartitionRangeMap(tableId int64) (map[string]*PartitionMeta, error) {
if _, err := m.GetPartitionIdMap(tableId); err != nil {
return nil, err
@@ -346,6 +365,7 @@ func (m *Meta) GetPartitionIds(tableName string) ([]int64, error) {
return partitionIds, nil
}
+// Get partition range by name, return xerror.Meta category if no such table or partition exists.
func (m *Meta) GetPartitionName(tableId int64, partitionId int64) (string, error) {
partitions, err := m.GetPartitionIdMap(tableId)
if err != nil {
@@ -358,13 +378,14 @@ func (m *Meta) GetPartitionName(tableId int64, partitionId int64) (string, error
return "", err
}
if partition, ok = partitions[partitionId]; !ok {
- return "", xerror.Errorf(xerror.Normal, "partitionId %d not found", partitionId)
+ return "", xerror.Errorf(xerror.Meta, "partitionId %d not found", partitionId)
}
}
return partition.Name, nil
}
+// Get partition range by id, return xerror.Meta category if no such table or partition exists.
func (m *Meta) GetPartitionRange(tableId int64, partitionId int64) (string, error) {
partitions, err := m.GetPartitionIdMap(tableId)
if err != nil {
@@ -377,13 +398,14 @@ func (m *Meta) GetPartitionRange(tableId int64, partitionId int64) (string, erro
return "", err
}
if partition, ok = partitions[partitionId]; !ok {
- return "", xerror.Errorf(xerror.Normal, "partitionId %d not found", partitionId)
+ return "", xerror.Errorf(xerror.Meta, "partitionId %d not found", partitionId)
}
}
return partition.Range, nil
}
+// Get partition id by name, return xerror.Meta category if no such partition exists.
func (m *Meta) GetPartitionIdByName(tableId int64, partitionName string) (int64, error) {
// TODO: optimize performance
partitions, err := m.GetPartitionIdMap(tableId)
@@ -406,9 +428,10 @@ func (m *Meta) GetPartitionIdByName(tableId int64, partitionName string) (int64,
}
}
- return 0, xerror.Errorf(xerror.Normal, "partition name %s not found", partitionName)
+ return 0, xerror.Errorf(xerror.Meta, "partition name %s not found", partitionName)
}
+// Get partition id by range, return xerror.Meta category if no such partition exists.
func (m *Meta) GetPartitionIdByRange(tableId int64, partitionRange string) (int64, error) {
// TODO: optimize performance
partitions, err := m.GetPartitionIdMap(tableId)
@@ -431,7 +454,7 @@ func (m *Meta) GetPartitionIdByRange(tableId int64, partitionRange string) (int6
}
}
- return 0, xerror.Errorf(xerror.Normal, "partition range %s not found", partitionRange)
+ return 0, xerror.Errorf(xerror.Meta, "partition range %s not found", partitionRange)
}
func (m *Meta) UpdateBackends() error {
@@ -472,11 +495,6 @@ func (m *Meta) UpdateBackends() error {
}
var port int64
- port, err = rowParser.GetInt64("HeartbeatPort")
- if err != nil {
- return xerror.Wrapf(err, xerror.Normal, query)
- }
- backend.HeartbeatPort = uint16(port)
port, err = rowParser.GetInt64("BePort")
if err != nil {
return xerror.Wrapf(err, xerror.Normal, query)
@@ -497,6 +515,10 @@ func (m *Meta) UpdateBackends() error {
backends = append(backends, &backend)
}
+ if err := rows.Err(); err != nil {
+ return xerror.Wrap(err, xerror.Normal, query)
+ }
+
for _, backend := range backends {
m.Backends[backend.Id] = backend
@@ -507,11 +529,85 @@ func (m *Meta) UpdateBackends() error {
return nil
}
+func (m *Meta) GetFrontends() ([]*base.Frontend, error) {
+ db, err := m.Connect()
+ if err != nil {
+ return nil, err
+ }
+
+ query := "select Host, QueryPort, RpcPort, IsMaster from frontends();"
+ log.Debug(query)
+ rows, err := db.Query(query)
+ if err != nil {
+ return nil, xerror.Wrap(err, xerror.Normal, query)
+ }
+
+ frontends := make([]*base.Frontend, 0)
+ defer rows.Close()
+ for rows.Next() {
+ rowParser := utils.NewRowParser()
+ if err := rowParser.Parse(rows); err != nil {
+ return nil, xerror.Wrapf(err, xerror.Normal, query)
+ }
+
+ var fe base.Frontend
+ fe.Host, err = rowParser.GetString("Host")
+ if err != nil {
+ return nil, xerror.Wrapf(err, xerror.Normal, query)
+ }
+
+ fe.Port, err = rowParser.GetString("QueryPort")
+ if err != nil {
+ return nil, xerror.Wrapf(err, xerror.Normal, query)
+ }
+
+ fe.ThriftPort, err = rowParser.GetString("RpcPort")
+ if err != nil {
+ return nil, xerror.Wrapf(err, xerror.Normal, query)
+ }
+
+ fe.IsMaster, err = rowParser.GetBool("IsMaster")
+ if err != nil {
+ return nil, xerror.Wrapf(err, xerror.Normal, query)
+ }
+
+ frontends = append(frontends, &fe)
+ }
+
+ if err := rows.Err(); err != nil {
+ return nil, xerror.Wrap(err, xerror.Normal, query)
+ }
+
+ if len(m.HostMapping) != 0 {
+ for _, frontend := range frontends {
+ if host, ok := m.HostMapping[frontend.Host]; ok {
+ frontend.Host = host
+ } else {
+ return nil, xerror.Errorf(xerror.Normal,
+ "the public ip of host %s is not found, consider adding it via HTTP API /add_host_mapping", frontend.Host)
+ }
+ }
+ }
+
+ return frontends, nil
+}
+
func (m *Meta) GetBackends() ([]*base.Backend, error) {
if len(m.Backends) > 0 {
backends := make([]*base.Backend, 0, len(m.Backends))
for _, backend := range m.Backends {
- backends = append(backends, backend)
+ backend := *backend // copy
+ backends = append(backends, &backend)
+ }
+ if len(m.HostMapping) != 0 {
+ for _, backend := range backends {
+ if host, ok := m.HostMapping[backend.Host]; ok {
+ backend.Host = host
+ } else {
+ return nil, xerror.Errorf(xerror.Normal,
+ "the public ip of host %s is not found, consider adding it via HTTP API /add_host_mapping", backend.Host)
+ }
+ }
}
return backends, nil
}
@@ -555,6 +651,7 @@ func (m *Meta) GetBackendId(host string, portStr string) (int64, error) {
return 0, xerror.Errorf(xerror.Normal, "hostPort: %s not found", hostPort)
}
+// Update indexes by table and partition, return xerror.Meta category if no such table or partition exists.
func (m *Meta) UpdateIndexes(tableId int64, partitionId int64) error {
// TODO: Optimize performance
// Step 1: get dbId
@@ -577,7 +674,7 @@ func (m *Meta) UpdateIndexes(tableId int64, partitionId int64) error {
partition, ok := partitions[partitionId]
if !ok {
- return xerror.Errorf(xerror.Normal, "partitionId: %d not found", partitionId)
+ return xerror.Errorf(xerror.Meta, "partitionId: %d not found", partitionId)
}
// mysql> show proc '/dbs/10116/10118/partitions/10117';
@@ -617,10 +714,12 @@ func (m *Meta) UpdateIndexes(tableId int64, partitionId int64) error {
}
log.Debugf("indexId: %d, indexName: %s", indexId, indexName)
+ isBaseIndex := table.Name == indexName // it might be staled, caused by rename table
index := &IndexMeta{
PartitionMeta: partition,
Id: indexId,
Name: indexName,
+ IsBaseIndex: isBaseIndex,
}
indexes = append(indexes, index)
}
@@ -639,6 +738,7 @@ func (m *Meta) UpdateIndexes(tableId int64, partitionId int64) error {
return nil
}
+// Get indexes by table and partition, return xerror.Meta if no such table or partition exists.
func (m *Meta) getIndexes(tableId int64, partitionId int64, hasUpdate bool) (map[int64]*IndexMeta, error) {
partitions, err := m.GetPartitionIdMap(tableId)
if err != nil {
@@ -648,7 +748,7 @@ func (m *Meta) getIndexes(tableId int64, partitionId int64, hasUpdate bool) (map
partition, ok := partitions[partitionId]
if !ok || len(partition.IndexIdMap) == 0 {
if hasUpdate {
- return nil, xerror.Errorf(xerror.Normal, "partitionId: %d not found", partitionId)
+ return nil, xerror.Errorf(xerror.Meta, "partitionId: %d not found", partitionId)
}
err = m.UpdateIndexes(tableId, partitionId)
@@ -661,22 +761,27 @@ func (m *Meta) getIndexes(tableId int64, partitionId int64, hasUpdate bool) (map
return partition.IndexIdMap, nil
}
+// Get indexes id map by table and partition, return xerror.Meta if no such table or partition exists.
func (m *Meta) GetIndexIdMap(tableId int64, partitionId int64) (map[int64]*IndexMeta, error) {
return m.getIndexes(tableId, partitionId, false)
}
-func (m *Meta) GetIndexNameMap(tableId int64, partitionId int64) (map[string]*IndexMeta, error) {
+// Get indexes name map by table and partition, return xerror.Meta if no such table or partition exists.
+func (m *Meta) GetIndexNameMap(tableId int64, partitionId int64) (map[string]*IndexMeta, *IndexMeta, error) {
if _, err := m.getIndexes(tableId, partitionId, false); err != nil {
- return nil, err
+ return nil, nil, err
}
partitions, err := m.GetPartitionIdMap(tableId)
if err != nil {
- return nil, err
+ return nil, nil, err
}
- partition := partitions[partitionId]
- return partition.IndexNameMap, nil
+ if partition, ok := partitions[partitionId]; !ok {
+ return nil, nil, xerror.Errorf(xerror.Meta, "partition %d is not found", partitionId)
+ } else {
+ return partition.IndexNameMap, nil, nil
+ }
}
func (m *Meta) updateReplica(index *IndexMeta) error {
@@ -756,6 +861,7 @@ func (m *Meta) updateReplica(index *IndexMeta) error {
return nil
}
+// Update replicas by table and partition, return xerror.Meta category if no such table or partition exists.
func (m *Meta) UpdateReplicas(tableId int64, partitionId int64) error {
indexes, err := m.GetIndexIdMap(tableId, partitionId)
if err != nil {
@@ -763,7 +869,7 @@ func (m *Meta) UpdateReplicas(tableId int64, partitionId int64) error {
}
if len(indexes) == 0 {
- return xerror.Errorf(xerror.Normal, "indexes is empty")
+ return xerror.Errorf(xerror.Meta, "indexes is empty")
}
// TODO: Update index as much as possible, record error
@@ -776,6 +882,7 @@ func (m *Meta) UpdateReplicas(tableId int64, partitionId int64) error {
return nil
}
+// Get replicas by table and partition, return xerror.Meta category if no such table or partition exists.
func (m *Meta) GetReplicas(tableId int64, partitionId int64) (*btree.Map[int64, *ReplicaMeta], error) {
indexes, err := m.GetIndexIdMap(tableId, partitionId)
if err != nil {
@@ -783,7 +890,7 @@ func (m *Meta) GetReplicas(tableId int64, partitionId int64) (*btree.Map[int64,
}
if len(indexes) == 0 {
- return nil, xerror.Errorf(xerror.Normal, "indexes is empty")
+ return nil, xerror.Errorf(xerror.Meta, "indexes is empty")
}
// fast path, no rollup
@@ -820,6 +927,7 @@ func (m *Meta) GetReplicas(tableId int64, partitionId int64) (*btree.Map[int64,
return replicas, nil
}
+// Get tablets by table, partition and index, return xerror.Meta category if no such table, partition or index exists.
func (m *Meta) GetTablets(tableId, partitionId, indexId int64) (*btree.Map[int64, *TabletMeta], error) {
_, err := m.GetReplicas(tableId, partitionId)
if err != nil {
@@ -834,7 +942,7 @@ func (m *Meta) GetTablets(tableId, partitionId, indexId int64) (*btree.Map[int64
if tablets, ok := indexes[indexId]; ok {
return tablets.TabletMetas, nil
} else {
- return nil, xerror.Errorf(xerror.Normal, "index %d not found", indexId)
+ return nil, xerror.Errorf(xerror.Meta, "index %d not found", indexId)
}
}
@@ -846,10 +954,12 @@ func (m *Meta) UpdateToken(rpcFactory rpc.IRpcFactory) error {
return err
}
- if token, err := rpc.GetMasterToken(spec); err != nil {
+ if resp, err := rpc.GetMasterToken(spec); err != nil {
return err
+ } else if resp.GetStatus().GetStatusCode() != tstatus.TStatusCode_OK {
+ return xerror.Errorf(xerror.Meta, "get master token failed, status: %s", resp.GetStatus().String())
} else {
- m.token = token
+ m.token = resp.GetToken()
return nil
}
}
@@ -879,7 +989,6 @@ func (m *Meta) GetTableNameById(tableId int64) (string, error) {
return "", err
}
- var tableName string
sql := fmt.Sprintf("show table %d", tableId)
rows, err := db.Query(sql)
if err != nil {
@@ -887,15 +996,18 @@ func (m *Meta) GetTableNameById(tableId int64) (string, error) {
}
defer rows.Close()
+ var tableName string
for rows.Next() {
rowParser := utils.NewRowParser()
if err := rowParser.Parse(rows); err != nil {
return "", xerror.Wrapf(err, xerror.Normal, sql)
}
+
tableName, err = rowParser.GetString("TableName")
if err != nil {
return "", xerror.Wrap(err, xerror.Normal, sql)
}
+ log.Debugf("found table %d name %s", tableId, tableName)
}
if err := rows.Err(); err != nil {
@@ -948,10 +1060,20 @@ func (m *Meta) GetTables() (map[int64]*TableMeta, error) {
if err != nil {
return nil, xerror.Wrapf(err, xerror.Normal, query)
}
+ tableType, err := rowParser.GetString("Type")
+ if err != nil {
+ return nil, xerror.Wrapf(err, xerror.Normal, "get tables Type failed, query: %s", query)
+ }
- // match parsedDbname == dbname, return dbId
fullTableName := m.GetFullTableName(tableName)
- log.Debugf("found table:%s, tableId:%d", fullTableName, tableId)
+ log.Debugf("found table: %s, id: %d, type: %s", fullTableName, tableId, tableType)
+
+ if tableType != TABLE_TYPE_OLAP && tableType != TABLE_TYPE_VIEW {
+ // See fe/fe-core/src/main/java/org/apache/doris/backup/BackupHandler.java:backup() for details
+ continue
+ }
+
+ // match parsedDbname == dbname, return dbId
tableName2IdMap[fullTableName] = tableId
tables[tableId] = &TableMeta{
DatabaseMeta: &m.DatabaseMeta,
@@ -975,7 +1097,7 @@ func (m *Meta) CheckBinlogFeature() error {
if binlogIsEnabled, err := m.isFEBinlogFeature(); err != nil {
return err
} else if !binlogIsEnabled {
- return xerror.Errorf(xerror.Normal, "Fe %v:%v enable_binlog_feature=false, please set it true in fe.conf",
+ return xerror.Errorf(xerror.Normal, "Fe %v:%v enable_feature_binlog=false, please set it true in fe.conf",
m.Spec.Host, m.Spec.Port)
}
@@ -1058,6 +1180,11 @@ func (m *Meta) DirtyGetTables() map[int64]*TableMeta {
return m.Tables
}
+func (m *Meta) ClearTablesCache() {
+ m.Tables = make(map[int64]*TableMeta)
+ m.TableName2IdMap = make(map[string]int64)
+}
+
func (m *Meta) ClearDB(dbName string) {
if m.Database != dbName {
log.Info("dbName not match, skip clear")
@@ -1089,3 +1216,15 @@ func (m *Meta) ClearTable(dbName string, tableName string) {
delete(m.TableName2IdMap, tableName)
}
+
+func (m *Meta) IsPartitionDropped(partitionId int64) bool {
+ panic("IsPartitionDropped is not supported, please use ThriftMeta instead")
+}
+
+func (m *Meta) IsTableDropped(partitionId int64) bool {
+ panic("IsTableDropped is not supported, please use ThriftMeta instead")
+}
+
+func (m *Meta) IsIndexDropped(indexId int64) bool {
+ panic("IsIndexDropped is not supported, please use ThriftMeta instead")
+}
diff --git a/pkg/ccr/metaer.go b/pkg/ccr/metaer.go
index 1048ce24..f7286f12 100644
--- a/pkg/ccr/metaer.go
+++ b/pkg/ccr/metaer.go
@@ -16,6 +16,7 @@ type DatabaseMeta struct {
type TableMeta struct {
DatabaseMeta *DatabaseMeta
Id int64
+ BaseIndexId int64
Name string // maybe dirty, such after rename
PartitionIdMap map[int64]*PartitionMeta // partitionId -> partitionMeta
PartitionRangeMap map[string]*PartitionMeta // partitionRange -> partitionMeta
@@ -27,24 +28,25 @@ func (t *TableMeta) String() string {
}
type PartitionMeta struct {
- TableMeta *TableMeta
- Id int64
- Name string
- Key string
- Range string
- IndexIdMap map[int64]*IndexMeta // indexId -> indexMeta
- IndexNameMap map[string]*IndexMeta // indexName -> indexMeta
+ TableMeta *TableMeta
+ Id int64
+ Name string
+ Range string
+ VisibleVersion int64
+ IndexIdMap map[int64]*IndexMeta // indexId -> indexMeta
+ IndexNameMap map[string]*IndexMeta // indexName -> indexMeta
}
// Stringer
func (p *PartitionMeta) String() string {
- return fmt.Sprintf("PartitionMeta{(id:%d), (name:%s), (key:%s), (range:%s)}", p.Id, p.Name, p.Key, p.Range)
+ return fmt.Sprintf("PartitionMeta{(id:%d), (name:%s), (range:%s)}", p.Id, p.Name, p.Range)
}
type IndexMeta struct {
PartitionMeta *PartitionMeta
Id int64
Name string
+ IsBaseIndex bool
TabletMetas *btree.Map[int64, *TabletMeta] // tabletId -> tablet
ReplicaMetas *btree.Map[int64, *ReplicaMeta] // replicaId -> replica
}
@@ -60,6 +62,7 @@ type ReplicaMeta struct {
Id int64
TabletId int64
BackendId int64
+ Version int64
}
type MetaCleaner interface {
@@ -67,6 +70,18 @@ type MetaCleaner interface {
ClearTable(dbName string, tableName string)
}
+type IngestBinlogMetaer interface {
+ GetTablets(tableId, partitionId, indexId int64) (*btree.Map[int64, *TabletMeta], error)
+ GetPartitionIdByRange(tableId int64, partitionRange string) (int64, error)
+ GetPartitionRangeMap(tableId int64) (map[string]*PartitionMeta, error)
+ GetIndexIdMap(tableId, partitionId int64) (map[int64]*IndexMeta, error)
+ GetIndexNameMap(tableId, partitionId int64) (map[string]*IndexMeta, *IndexMeta, error)
+ GetBackendMap() (map[int64]*base.Backend, error)
+ IsPartitionDropped(partitionId int64) bool
+ IsTableDropped(tableId int64) bool
+ IsIndexDropped(indexId int64) bool
+}
+
type Metaer interface {
GetDbId() (int64, error)
GetFullTableName(tableName string) string
@@ -79,35 +94,29 @@ type Metaer interface {
UpdatePartitions(tableId int64) error
GetPartitionIdMap(tableId int64) (map[int64]*PartitionMeta, error)
- GetPartitionRangeMap(tableId int64) (map[string]*PartitionMeta, error)
GetPartitionIds(tableName string) ([]int64, error)
GetPartitionName(tableId int64, partitionId int64) (string, error)
GetPartitionRange(tableId int64, partitionId int64) (string, error)
GetPartitionIdByName(tableId int64, partitionName string) (int64, error)
- GetPartitionIdByRange(tableId int64, partitionRange string) (int64, error)
+ GetFrontends() ([]*base.Frontend, error)
UpdateBackends() error
GetBackends() ([]*base.Backend, error)
- GetBackendMap() (map[int64]*base.Backend, error)
GetBackendId(host, portStr string) (int64, error)
UpdateIndexes(tableId, partitionId int64) error
- GetIndexIdMap(tableId, partitionId int64) (map[int64]*IndexMeta, error)
- GetIndexNameMap(tableId, partitionId int64) (map[string]*IndexMeta, error)
UpdateReplicas(tableId, partitionId int64) error
GetReplicas(tableId, partitionId int64) (*btree.Map[int64, *ReplicaMeta], error)
- GetTablets(tableId, partitionId, indexId int64) (*btree.Map[int64, *TabletMeta], error)
-
UpdateToken(rpcFactory rpc.IRpcFactory) error
GetMasterToken(rpcFactory rpc.IRpcFactory) (string, error)
CheckBinlogFeature() error
DirtyGetTables() map[int64]*TableMeta
+ ClearTablesCache()
- // from Spec
- DbExec(sql string) error
+ IngestBinlogMetaer
MetaCleaner
}
diff --git a/pkg/ccr/metaer_factory.go b/pkg/ccr/metaer_factory.go
index 64801492..ea498659 100644
--- a/pkg/ccr/metaer_factory.go
+++ b/pkg/ccr/metaer_factory.go
@@ -8,22 +8,12 @@ type MetaerFactory interface {
NewMeta(tableSpec *base.Spec) Metaer
}
-type MetaFactory struct {
-}
+type MetaFactory struct{}
func NewMetaFactory() MetaerFactory {
return &MetaFactory{}
}
-func (mf *MetaFactory) NewMeta(tableSpec *base.Spec) Metaer {
- return &Meta{
- Spec: tableSpec,
- DatabaseMeta: DatabaseMeta{
- Tables: make(map[int64]*TableMeta),
- },
- Backends: make(map[int64]*base.Backend),
- DatabaseName2IdMap: make(map[string]int64),
- TableName2IdMap: make(map[string]int64),
- BackendHostPort2IdMap: make(map[string]int64),
- }
+func (mf *MetaFactory) NewMeta(spec *base.Spec) Metaer {
+ return NewMeta(spec)
}
diff --git a/pkg/ccr/metaer_factory_mock.go b/pkg/ccr/metaer_factory_mock.go
index e1600f05..6b640756 100644
--- a/pkg/ccr/metaer_factory_mock.go
+++ b/pkg/ccr/metaer_factory_mock.go
@@ -1,9 +1,9 @@
// Code generated by MockGen. DO NOT EDIT.
-// Source: ccr/metaer_factory.go
+// Source: pkg/ccr/metaer_factory.go
//
// Generated by this command:
//
-// mockgen -source=ccr/metaer_factory.go -destination=ccr/metaer_factory_mock.go -package=ccr
+// mockgen -source=pkg/ccr/metaer_factory.go -destination=pkg/ccr/metaer_factory_mock.go -package=ccr
//
// Package ccr is a generated GoMock package.
package ccr
diff --git a/pkg/ccr/metaer_mock.go b/pkg/ccr/metaer_mock.go
index a0f0cb9a..bfba5308 100644
--- a/pkg/ccr/metaer_mock.go
+++ b/pkg/ccr/metaer_mock.go
@@ -1,9 +1,9 @@
// Code generated by MockGen. DO NOT EDIT.
-// Source: ccr/metaer.go
+// Source: pkg/ccr/metaer.go
//
// Generated by this command:
//
-// mockgen -source=ccr/metaer.go -destination=ccr/metaer_mock.go -package=ccr
+// mockgen -source=pkg/ccr/metaer.go -destination=pkg/ccr/metaer_mock.go -package=ccr
//
// Package ccr is a generated GoMock package.
package ccr
@@ -64,6 +64,119 @@ func (mr *MockMetaCleanerMockRecorder) ClearTable(dbName, tableName any) *gomock
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClearTable", reflect.TypeOf((*MockMetaCleaner)(nil).ClearTable), dbName, tableName)
}
+// MockIngestBinlogMetaer is a mock of IngestBinlogMetaer interface.
+type MockIngestBinlogMetaer struct {
+ ctrl *gomock.Controller
+ recorder *MockIngestBinlogMetaerMockRecorder
+}
+
+// MockIngestBinlogMetaerMockRecorder is the mock recorder for MockIngestBinlogMetaer.
+type MockIngestBinlogMetaerMockRecorder struct {
+ mock *MockIngestBinlogMetaer
+}
+
+// NewMockIngestBinlogMetaer creates a new mock instance.
+func NewMockIngestBinlogMetaer(ctrl *gomock.Controller) *MockIngestBinlogMetaer {
+ mock := &MockIngestBinlogMetaer{ctrl: ctrl}
+ mock.recorder = &MockIngestBinlogMetaerMockRecorder{mock}
+ return mock
+}
+
+// EXPECT returns an object that allows the caller to indicate expected use.
+func (m *MockIngestBinlogMetaer) EXPECT() *MockIngestBinlogMetaerMockRecorder {
+ return m.recorder
+}
+
+// GetBackendMap mocks base method.
+func (m *MockIngestBinlogMetaer) GetBackendMap() (map[int64]*base.Backend, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "GetBackendMap")
+ ret0, _ := ret[0].(map[int64]*base.Backend)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// GetBackendMap indicates an expected call of GetBackendMap.
+func (mr *MockIngestBinlogMetaerMockRecorder) GetBackendMap() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBackendMap", reflect.TypeOf((*MockIngestBinlogMetaer)(nil).GetBackendMap))
+}
+
+// GetIndexIdMap mocks base method.
+func (m *MockIngestBinlogMetaer) GetIndexIdMap(tableId, partitionId int64) (map[int64]*IndexMeta, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "GetIndexIdMap", tableId, partitionId)
+ ret0, _ := ret[0].(map[int64]*IndexMeta)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// GetIndexIdMap indicates an expected call of GetIndexIdMap.
+func (mr *MockIngestBinlogMetaerMockRecorder) GetIndexIdMap(tableId, partitionId any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetIndexIdMap", reflect.TypeOf((*MockIngestBinlogMetaer)(nil).GetIndexIdMap), tableId, partitionId)
+}
+
+// GetIndexNameMap mocks base method.
+func (m *MockIngestBinlogMetaer) GetIndexNameMap(tableId, partitionId int64) (map[string]*IndexMeta, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "GetIndexNameMap", tableId, partitionId)
+ ret0, _ := ret[0].(map[string]*IndexMeta)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// GetIndexNameMap indicates an expected call of GetIndexNameMap.
+func (mr *MockIngestBinlogMetaerMockRecorder) GetIndexNameMap(tableId, partitionId any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetIndexNameMap", reflect.TypeOf((*MockIngestBinlogMetaer)(nil).GetIndexNameMap), tableId, partitionId)
+}
+
+// GetPartitionIdByRange mocks base method.
+func (m *MockIngestBinlogMetaer) GetPartitionIdByRange(tableId int64, partitionRange string) (int64, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "GetPartitionIdByRange", tableId, partitionRange)
+ ret0, _ := ret[0].(int64)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// GetPartitionIdByRange indicates an expected call of GetPartitionIdByRange.
+func (mr *MockIngestBinlogMetaerMockRecorder) GetPartitionIdByRange(tableId, partitionRange any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPartitionIdByRange", reflect.TypeOf((*MockIngestBinlogMetaer)(nil).GetPartitionIdByRange), tableId, partitionRange)
+}
+
+// GetPartitionRangeMap mocks base method.
+func (m *MockIngestBinlogMetaer) GetPartitionRangeMap(tableId int64) (map[string]*PartitionMeta, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "GetPartitionRangeMap", tableId)
+ ret0, _ := ret[0].(map[string]*PartitionMeta)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// GetPartitionRangeMap indicates an expected call of GetPartitionRangeMap.
+func (mr *MockIngestBinlogMetaerMockRecorder) GetPartitionRangeMap(tableId any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPartitionRangeMap", reflect.TypeOf((*MockIngestBinlogMetaer)(nil).GetPartitionRangeMap), tableId)
+}
+
+// GetTablets mocks base method.
+func (m *MockIngestBinlogMetaer) GetTablets(tableId, partitionId, indexId int64) (*btree.Map[int64, *TabletMeta], error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "GetTablets", tableId, partitionId, indexId)
+ ret0, _ := ret[0].(*btree.Map[int64, *TabletMeta])
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// GetTablets indicates an expected call of GetTablets.
+func (mr *MockIngestBinlogMetaerMockRecorder) GetTablets(tableId, partitionId, indexId any) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTablets", reflect.TypeOf((*MockIngestBinlogMetaer)(nil).GetTablets), tableId, partitionId, indexId)
+}
+
// MockMetaer is a mock of Metaer interface.
type MockMetaer struct {
ctrl *gomock.Controller
@@ -213,6 +326,21 @@ func (mr *MockMetaerMockRecorder) GetDbId() *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDbId", reflect.TypeOf((*MockMetaer)(nil).GetDbId))
}
+// GetFrontends mocks base method.
+func (m *MockMetaer) GetFrontends() ([]*base.Frontend, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "GetFrontends")
+ ret0, _ := ret[0].([]*base.Frontend)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// GetFrontends indicates an expected call of GetFrontends.
+func (mr *MockMetaerMockRecorder) GetFrontends() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetFrontends", reflect.TypeOf((*MockMetaer)(nil).GetFrontends))
+}
+
// GetFullTableName mocks base method.
func (m *MockMetaer) GetFullTableName(tableName string) string {
m.ctrl.T.Helper()
diff --git a/pkg/ccr/record/add_partition.go b/pkg/ccr/record/add_partition.go
index 04f78f22..567913e4 100644
--- a/pkg/ccr/record/add_partition.go
+++ b/pkg/ccr/record/add_partition.go
@@ -2,13 +2,32 @@ package record
import (
"encoding/json"
+ "fmt"
+ "strings"
+ "github.com/selectdb/ccr_syncer/pkg/utils"
"github.com/selectdb/ccr_syncer/pkg/xerror"
+
+ log "github.com/sirupsen/logrus"
)
+type DistributionInfo struct {
+ BucketNum int `json:"bucketNum"`
+ Type string `json:"type"`
+ DistributionColumns []struct {
+ Name string `json:"name"`
+ } `json:"distributionColumns"`
+}
+
type AddPartition struct {
- TableId int64 `json:"tableId"`
- Sql string `json:"sql"`
+ DbId int64 `json:"dbId"`
+ TableId int64 `json:"tableId"`
+ Sql string `json:"sql"`
+ IsTemp bool `json:"isTempPartition"`
+ Partition struct {
+ DistributionInfoOld *DistributionInfo `json:"distributionInfo"`
+ DistributionInfoNew *DistributionInfo `json:"di"`
+ } `json:"partition"`
}
func NewAddPartitionFromJson(data string) (*AddPartition, error) {
@@ -19,7 +38,6 @@ func NewAddPartitionFromJson(data string) (*AddPartition, error) {
}
if addPartition.Sql == "" {
- // TODO: fallback to create sql from other fields
return nil, xerror.Errorf(xerror.Normal, "add partition sql is empty")
}
@@ -29,3 +47,61 @@ func NewAddPartitionFromJson(data string) (*AddPartition, error) {
return &addPartition, nil
}
+
+func (addPartition *AddPartition) getDistributionInfo() *DistributionInfo {
+ if addPartition.Partition.DistributionInfoOld != nil {
+ return addPartition.Partition.DistributionInfoOld
+ }
+ return addPartition.Partition.DistributionInfoNew
+}
+
+func (addPartition *AddPartition) getDistributionColumns() []string {
+ var distributionColumns []string
+ for _, column := range addPartition.getDistributionInfo().DistributionColumns {
+ distributionColumns = append(distributionColumns, column.Name)
+ }
+ return distributionColumns
+}
+
+func (addPartition *AddPartition) GetSql(destTableName string) string {
+ // addPartitionSql = "ALTER TABLE " + sql
+ addPartitionSql := fmt.Sprintf("ALTER TABLE %s %s", utils.FormatKeywordName(destTableName), addPartition.Sql)
+ // remove last ';' and add BUCKETS num
+ addPartitionSql = strings.TrimRight(addPartitionSql, ";")
+ // check contains BUCKETS num, ignore case
+ if strings.Contains(strings.ToUpper(addPartitionSql), "BUCKETS") {
+ // if not contains BUCKETS AUTO, return directly
+ if !strings.Contains(strings.ToUpper(addPartitionSql), "BUCKETS AUTO") {
+ log.Infof("addPartitionSql contains BUCKETS declaration, sql: %s", addPartitionSql)
+ return addPartitionSql
+ }
+
+ log.Info("addPartitionSql contains BUCKETS AUTO, remove it")
+ // BUCKETS AUTO is in the end of sql, remove it, so we not care about the string after BUCKETS AUTO
+ // Remove BUCKETS AUTO case, but not change other sql case
+ // find BUCKETS AUTO index, remove it from origin sql
+ bucketsAutoIndex := strings.LastIndex(strings.ToUpper(addPartitionSql), "BUCKETS AUTO")
+ addPartitionSql = addPartitionSql[:bucketsAutoIndex]
+ }
+
+ // check contain DISTRIBUTED BY
+ // if not contain
+ // create like below sql
+ // ALTER TABLE my_table
+ // ADD PARTITION p1 VALUES LESS THAN ("2015-01-01")
+ // DISTRIBUTED BY HASH(k1) BUCKETS 20;
+ // or DISTRIBUTED BY RANDOM BUCKETS 20;
+ distributionInfo := addPartition.getDistributionInfo()
+ if !strings.Contains(strings.ToUpper(addPartitionSql), "DISTRIBUTED BY") {
+ if distributionInfo.Type == "HASH" {
+ addPartitionSql = fmt.Sprintf("%s DISTRIBUTED BY HASH(%s)", addPartitionSql,
+ "`"+strings.Join(addPartition.getDistributionColumns(), "`,`")+"`")
+ } else {
+ addPartitionSql = fmt.Sprintf("%s DISTRIBUTED BY RANDOM", addPartitionSql)
+ }
+ }
+ bucketNum := distributionInfo.BucketNum
+ addPartitionSql = fmt.Sprintf("%s BUCKETS %d", addPartitionSql, bucketNum)
+
+ return addPartitionSql
+}
diff --git a/pkg/ccr/record/alter_job_v2.go b/pkg/ccr/record/alter_job_v2.go
index e52c337c..c74a7a6c 100644
--- a/pkg/ccr/record/alter_job_v2.go
+++ b/pkg/ccr/record/alter_job_v2.go
@@ -7,14 +7,32 @@ import (
"github.com/selectdb/ccr_syncer/pkg/xerror"
)
+const (
+ ALTER_JOB_SCHEMA_CHANGE = "SCHEMA_CHANGE"
+ ALTER_JOB_ROLLUP = "ROLLUP"
+
+ ALTER_JOB_STATE_PENDING = "PENDING"
+ ALTER_JOB_STATE_WAITING_TXN = "WAITING_TXN"
+ ALTER_JOB_STATE_RUNNING = "RUNNING"
+ ALTER_JOB_STATE_FINISHED = "FINISHED"
+ ALTER_JOB_STATE_CANCELLED = "CANCELLED"
+)
+
type AlterJobV2 struct {
- Type string `json:"type"`
- DbId int64 `json:"dbId"`
- TableId int64 `json:"tableId"`
- TableName string `json:"tableName"`
- JobId int64 `json:"jobId"`
- JobState string `json:"jobState"`
- RawSql string `json:"rawSql"`
+ Type string `json:"type"`
+ DbId int64 `json:"dbId"`
+ TableId int64 `json:"tableId"`
+ TableName string `json:"tableName"`
+ JobId int64 `json:"jobId"`
+ JobState string `json:"jobState"`
+ RawSql string `json:"rawSql"`
+ ShadowIndexes map[int64]int64 `json:"iim"`
+
+ // for rollup
+ RollupIndexId int64 `json:"rollupIndexId"`
+ RollupIndexName string `json:"rollupIndexName"`
+ BaseIndexId int64 `json:"baseIndexId"`
+ BaseIndexName string `json:"baseIndexName"`
}
func NewAlterJobV2FromJson(data string) (*AlterJobV2, error) {
@@ -31,14 +49,18 @@ func NewAlterJobV2FromJson(data string) (*AlterJobV2, error) {
// }
if alterJob.TableId == 0 {
- return nil, xerror.Errorf(xerror.Normal, "table id not found")
+ return nil, xerror.Errorf(xerror.Normal, "invalid alter job, table id not found")
+ }
+
+ if alterJob.TableName == "" {
+ return nil, xerror.Errorf(xerror.Normal, "invalid alter job, tableName is empty")
}
return &alterJob, nil
}
func (a *AlterJobV2) IsFinished() bool {
- return a.JobState == "FINISHED"
+ return a.JobState == ALTER_JOB_STATE_FINISHED
}
// Stringer
diff --git a/pkg/ccr/record/alter_view.go b/pkg/ccr/record/alter_view.go
new file mode 100644
index 00000000..eb5687f9
--- /dev/null
+++ b/pkg/ccr/record/alter_view.go
@@ -0,0 +1,31 @@
+package record
+
+import (
+ "encoding/json"
+ "fmt"
+)
+
+type AlterView struct {
+ DbId int64 `json:"dbId"`
+ TableId int64 `json:"tableId"`
+ InlineViewDef string `json:"inlineViewDef"`
+ SqlMode int64 `json:"sqlMode"`
+}
+
+func NewAlterViewFromJson(data string) (*AlterView, error) {
+ var alterView AlterView
+ err := json.Unmarshal([]byte(data), &alterView)
+ if err != nil {
+ return nil, fmt.Errorf("unmarshal alter view error: %v", err)
+ }
+
+ if alterView.TableId == 0 {
+ return nil, fmt.Errorf("table id not found")
+ }
+
+ return &alterView, nil
+}
+
+func (a *AlterView) String() string {
+ return fmt.Sprintf("AlterView: DbId: %d, TableId: %d, InlineViewDef: %s, SqlMode: %d", a.DbId, a.TableId, a.InlineViewDef, a.SqlMode)
+}
diff --git a/pkg/ccr/record/barrier_log.go b/pkg/ccr/record/barrier_log.go
new file mode 100644
index 00000000..d1b93ffe
--- /dev/null
+++ b/pkg/ccr/record/barrier_log.go
@@ -0,0 +1,23 @@
+package record
+
+import (
+ "encoding/json"
+
+ "github.com/selectdb/ccr_syncer/pkg/xerror"
+)
+
+type BarrierLog struct {
+ DbId int64 `json:"dbId"`
+ TableId int64 `json:"tableId"`
+ BinlogType int64 `json:"binlogType"`
+ Binlog string `json:"binlog"`
+}
+
+func NewBarrierLogFromJson(data string) (*BarrierLog, error) {
+ var log BarrierLog
+ err := json.Unmarshal([]byte(data), &log)
+ if err != nil {
+ return nil, xerror.Wrap(err, xerror.Normal, "unmarshal barrier log error")
+ }
+ return &log, nil
+}
diff --git a/pkg/ccr/record/create_table.go b/pkg/ccr/record/create_table.go
index 634fc218..182107dd 100644
--- a/pkg/ccr/record/create_table.go
+++ b/pkg/ccr/record/create_table.go
@@ -3,6 +3,7 @@ package record
import (
"encoding/json"
"fmt"
+ "regexp"
"github.com/selectdb/ccr_syncer/pkg/xerror"
)
@@ -11,6 +12,10 @@ type CreateTable struct {
DbId int64 `json:"dbId"`
TableId int64 `json:"tableId"`
Sql string `json:"sql"`
+
+ // Below fields was added in doris 2.0.3: https://github.com/apache/doris/pull/26901
+ DbName string `json:"dbName"`
+ TableName string `json:"tableName"`
}
func NewCreateTableFromJson(data string) (*CreateTable, error) {
@@ -32,7 +37,13 @@ func NewCreateTableFromJson(data string) (*CreateTable, error) {
return &createTable, nil
}
+func (c *CreateTable) IsCreateView() bool {
+ viewRegex := regexp.MustCompile(`(?i)^CREATE(\s+)VIEW`)
+ return viewRegex.MatchString(c.Sql)
+}
+
// String
func (c *CreateTable) String() string {
- return fmt.Sprintf("CreateTable: DbId: %d, TableId: %d, Sql: %s", c.DbId, c.TableId, c.Sql)
+ return fmt.Sprintf("CreateTable: DbId: %d, DbName: %s, TableId: %d, TableName: %s, Sql: %s",
+ c.DbId, c.DbName, c.TableId, c.TableName, c.Sql)
}
diff --git a/pkg/ccr/record/drop_partition.go b/pkg/ccr/record/drop_partition.go
index c1cdf02d..37b01245 100644
--- a/pkg/ccr/record/drop_partition.go
+++ b/pkg/ccr/record/drop_partition.go
@@ -9,6 +9,7 @@ import (
type DropPartition struct {
TableId int64 `json:"tableId"`
Sql string `json:"sql"`
+ IsTemp bool `json:"isTempPartition"`
}
func NewDropPartitionFromJson(data string) (*DropPartition, error) {
diff --git a/pkg/ccr/record/drop_rollup.go b/pkg/ccr/record/drop_rollup.go
new file mode 100644
index 00000000..d7e546e2
--- /dev/null
+++ b/pkg/ccr/record/drop_rollup.go
@@ -0,0 +1,43 @@
+package record
+
+import (
+ "encoding/json"
+ "fmt"
+
+ "github.com/selectdb/ccr_syncer/pkg/xerror"
+)
+
+type DropRollup struct {
+ DbId int64 `json:"dbId"`
+ TableId int64 `json:"tableId"`
+ TableName string `json:"tableName"`
+ IndexId int64 `json:"indexId"`
+ IndexName string `json:"indexName"`
+}
+
+func NewDropRollupFromJson(data string) (*DropRollup, error) {
+ var dropRollup DropRollup
+ err := json.Unmarshal([]byte(data), &dropRollup)
+ if err != nil {
+ return nil, xerror.Wrap(err, xerror.Normal, "unmarshal drop rollup error")
+ }
+
+ if dropRollup.TableId == 0 {
+ return nil, xerror.Errorf(xerror.Normal, "invalid drop rollup, table id not found")
+ }
+
+ if dropRollup.TableName == "" {
+ return nil, xerror.Errorf(xerror.Normal, "invalid drop rollup, tableName is empty")
+ }
+
+ if dropRollup.IndexName == "" {
+ return nil, xerror.Errorf(xerror.Normal, "invalid drop rollup, indexName is empty")
+ }
+
+ return &dropRollup, nil
+}
+
+func (d *DropRollup) String() string {
+ return fmt.Sprintf("DropRollup{DbId: %d, TableId: %d, TableName: %s, IndexId: %d, IndexName: %s}",
+ d.DbId, d.TableId, d.TableName, d.IndexId, d.IndexName)
+}
diff --git a/pkg/ccr/record/drop_table.go b/pkg/ccr/record/drop_table.go
index 05e4d96c..b61d0eff 100644
--- a/pkg/ccr/record/drop_table.go
+++ b/pkg/ccr/record/drop_table.go
@@ -11,6 +11,7 @@ type DropTable struct {
DbId int64 `json:"dbId"`
TableId int64 `json:"tableId"`
TableName string `json:"tableName"`
+ IsView bool `json:"isView"`
RawSql string `json:"rawSql"`
}
@@ -30,5 +31,5 @@ func NewDropTableFromJson(data string) (*DropTable, error) {
// Stringer, all fields
func (c *DropTable) String() string {
- return fmt.Sprintf("DropTable: DbId: %d, TableId: %d, TableName: %s, RawSql: %s", c.DbId, c.TableId, c.TableName, c.RawSql)
+ return fmt.Sprintf("DropTable: DbId: %d, TableId: %d, TableName: %s, IsView: %t, RawSql: %s", c.DbId, c.TableId, c.TableName, c.IsView, c.RawSql)
}
diff --git a/pkg/ccr/record/index.go b/pkg/ccr/record/index.go
new file mode 100644
index 00000000..94714957
--- /dev/null
+++ b/pkg/ccr/record/index.go
@@ -0,0 +1,58 @@
+package record
+
+const (
+ INDEX_TYPE_BITMAP = "BITMAP"
+ INDEX_TYPE_INVERTED = "INVERTED"
+ INDEX_TYPE_BLOOMFILTER = "BLOOMFILTER"
+ INDEX_TYPE_NGRAM_BF = "NGRAM_BF"
+)
+
+type Index struct {
+ IndexId int64 `json:"indexId"`
+ IndexName string `json:"indexName"`
+ Columns []string `json:"columns"`
+ IndexType string `json:"indexType"`
+ Properties map[string]string `json:"properties"`
+ Comment string `json:"comment"`
+ ColumnUniqueIds []int `json:"columnUniqueIds"`
+
+ IndexIdAlternative int64 `json:"i"`
+ IndexNameAlternative string `json:"in"`
+ ColumnsAlternative []string `json:"c"`
+ IndexTypeAlternative string `json:"it"`
+ PropertiesAlternative map[string]string `json:"pt"`
+ CommentAlternative string `json:"ct"`
+ ColumnUniqueIdsAlternative []int `json:"cui"`
+}
+
+func (index *Index) GetIndexName() string {
+ if index.IndexName != "" {
+ return index.IndexName
+ }
+ return index.IndexNameAlternative
+}
+
+func (index *Index) GetColumns() []string {
+ if len(index.Columns) > 0 {
+ return index.Columns
+ }
+ return index.ColumnsAlternative
+}
+
+func (index *Index) GetComment() string {
+ if index.Comment != "" {
+ return index.Comment
+ }
+ return index.CommentAlternative
+}
+
+func (index *Index) GetIndexType() string {
+ if index.IndexType != "" {
+ return index.IndexType
+ }
+ return index.IndexTypeAlternative
+}
+
+func (index *Index) IsInvertedIndex() bool {
+ return index.GetIndexType() == INDEX_TYPE_INVERTED
+}
diff --git a/pkg/ccr/record/index_change_job.go b/pkg/ccr/record/index_change_job.go
new file mode 100644
index 00000000..979e1673
--- /dev/null
+++ b/pkg/ccr/record/index_change_job.go
@@ -0,0 +1,59 @@
+package record
+
+import (
+ "encoding/json"
+
+ "github.com/selectdb/ccr_syncer/pkg/xerror"
+)
+
+const (
+ INDEX_CHANGE_JOB_STATE_RUNNING = "RUNNING"
+ INDEX_CHANGE_JOB_STATE_FINISHED = "FINISHED"
+ INDEX_CHANGE_JOB_STATE_CANCELLED = "CANCELLED"
+ INDEX_CHANGE_JOB_STATE_WAITING_TXN = "WATING_TXN"
+)
+
+type IndexChangeJob struct {
+ DbId int64 `json:"dbId"`
+ TableId int64 `json:"tableId"`
+ TableName string `json:"tableName"`
+ PartitionId int64 `json:"partitionId"`
+ PartitionName string `json:"partitionName"`
+ JobState string `json:"jobState"`
+ ErrMsg string `json:"errMsg"`
+ CreateTimeMs int64 `json:"createTimeMs"`
+ FinishedTimeMs int64 `json:"finishedTimeMs"`
+ IsDropOp bool `json:"isDropOp"`
+ OriginIndexId int64 `json:"originIndexId"`
+ TimeoutMs int64 `json:"timeoutMs"`
+ Indexes []Index `json:"alterInvertedIndexes"`
+}
+
+func NewIndexChangeJobFromJson(data string) (*IndexChangeJob, error) {
+ m := &IndexChangeJob{}
+ if err := json.Unmarshal([]byte(data), m); err != nil {
+ return nil, xerror.Wrap(err, xerror.Normal, "unmarshal index change job error")
+ }
+
+ if m.TableId == 0 {
+ return nil, xerror.Errorf(xerror.Normal, "index change job table id not found")
+ }
+
+ if m.PartitionId == 0 {
+ return nil, xerror.Errorf(xerror.Normal, "index change job partition id not found")
+ }
+
+ if m.JobState == "" {
+ return nil, xerror.Errorf(xerror.Normal, "index change job state not found")
+ }
+
+ if len(m.Indexes) == 0 {
+ return nil, xerror.Errorf(xerror.Normal, "index change job alter inverted indexes is empty")
+ }
+
+ if !m.IsDropOp && len(m.Indexes) != 1 {
+ return nil, xerror.Errorf(xerror.Normal, "index change job alter inverted indexes length is not 1")
+ }
+
+ return m, nil
+}
diff --git a/pkg/ccr/record/modify_comment.go b/pkg/ccr/record/modify_comment.go
new file mode 100644
index 00000000..4fc60f2e
--- /dev/null
+++ b/pkg/ccr/record/modify_comment.go
@@ -0,0 +1,35 @@
+package record
+
+import (
+ "encoding/json"
+ "fmt"
+
+ "github.com/selectdb/ccr_syncer/pkg/xerror"
+)
+
+type ModifyComment struct {
+ Type string `json:"type"`
+ DbId int64 `json:"dbId"`
+ TblId int64 `json:"tblId"`
+ ColToComment map[string]string `json:"colToComment"`
+ TblComment string `json:"tblComment"`
+}
+
+func NewModifyCommentFromJson(data string) (*ModifyComment, error) {
+ var modifyComment ModifyComment
+ err := json.Unmarshal([]byte(data), &modifyComment)
+ if err != nil {
+ return nil, xerror.Wrap(err, xerror.Normal, "unmarshal modify comment error")
+ }
+
+ if modifyComment.TblId == 0 {
+ return nil, xerror.Errorf(xerror.Normal, "table id not found")
+ }
+
+ return &modifyComment, nil
+}
+
+// Stringer
+func (r *ModifyComment) String() string {
+ return fmt.Sprintf("ModifyComment: Type: %s, DbId: %d, TblId: %d, ColToComment: %v, TblComment: %s", r.Type, r.DbId, r.TblId, r.ColToComment, r.TblComment)
+}
diff --git a/pkg/ccr/record/modify_table_add_or_drop_inverted_indices.go b/pkg/ccr/record/modify_table_add_or_drop_inverted_indices.go
new file mode 100644
index 00000000..6e99f68d
--- /dev/null
+++ b/pkg/ccr/record/modify_table_add_or_drop_inverted_indices.go
@@ -0,0 +1,45 @@
+package record
+
+import (
+ "encoding/json"
+ "strings"
+
+ "github.com/selectdb/ccr_syncer/pkg/xerror"
+)
+
+type ModifyTableAddOrDropInvertedIndices struct {
+ DbId int64 `json:"dbId"`
+ TableId int64 `json:"tableId"`
+ IsDropInvertedIndex bool `json:"isDropInvertedIndex"`
+ RawSql string `json:"rawSql"`
+ Indexes []Index `json:"indexes"`
+ AlternativeIndexes []Index `json:"alterInvertedIndexes"`
+}
+
+func NewModifyTableAddOrDropInvertedIndicesFromJson(data string) (*ModifyTableAddOrDropInvertedIndices, error) {
+ m := &ModifyTableAddOrDropInvertedIndices{}
+ if err := json.Unmarshal([]byte(data), m); err != nil {
+ return nil, xerror.Wrap(err, xerror.Normal, "unmarshal modify table add or drop inverted indices error")
+ }
+
+ if m.RawSql == "" {
+ // TODO: fallback to create sql from other fields
+ return nil, xerror.Errorf(xerror.Normal, "modify table add or drop inverted indices sql is empty")
+ }
+
+ if m.TableId == 0 {
+ return nil, xerror.Errorf(xerror.Normal, "modify table add or drop inverted indices table id not found")
+ }
+
+ return m, nil
+}
+
+func (m *ModifyTableAddOrDropInvertedIndices) GetRawSql() string {
+ if strings.Contains(m.RawSql, "ALTER TABLE") && strings.Contains(m.RawSql, "INDEX") &&
+ !strings.Contains(m.RawSql, "DROP INDEX") && !strings.Contains(m.RawSql, "ADD INDEX") {
+ // fix the syntax error
+ // See apache/doris#44392 for details
+ return strings.ReplaceAll(m.RawSql, "INDEX", "ADD INDEX")
+ }
+ return m.RawSql
+}
diff --git a/pkg/ccr/record/recover_info.go b/pkg/ccr/record/recover_info.go
new file mode 100644
index 00000000..6325dbd7
--- /dev/null
+++ b/pkg/ccr/record/recover_info.go
@@ -0,0 +1,50 @@
+package record
+
+import (
+ "encoding/json"
+ "fmt"
+
+ "github.com/selectdb/ccr_syncer/pkg/xerror"
+)
+
+type RecoverInfo struct {
+ DbId int64 `json:"dbId"`
+ NewDbName string `json:"newDbName"`
+ TableId int64 `json:"tableId"`
+ TableName string `json:"tableName"`
+ NewTableName string `json:"newTableName"`
+ PartitionId int64 `json:"partitionId"`
+ PartitionName string `json:"partitionName"`
+ NewPartitionName string `json:"newPartitionName"`
+}
+
+func NewRecoverInfoFromJson(data string) (*RecoverInfo, error) {
+ var recoverInfo RecoverInfo
+ err := json.Unmarshal([]byte(data), &recoverInfo)
+ if err != nil {
+ return nil, xerror.Wrap(err, xerror.Normal, "unmarshal create table error")
+ }
+
+ if recoverInfo.TableId == 0 {
+ return nil, xerror.Errorf(xerror.Normal, "table id not found")
+ }
+
+ // table name must exist. partition name not checked since optional.
+ if recoverInfo.TableName == "" {
+ return nil, xerror.Errorf(xerror.Normal, "Table Name can not be null")
+ }
+ return &recoverInfo, nil
+}
+
+func (c *RecoverInfo) IsRecoverTable() bool {
+ if c.PartitionName == "" || c.PartitionId == -1 {
+ return true
+ }
+ return false
+}
+
+// String
+func (c *RecoverInfo) String() string {
+ return fmt.Sprintf("RecoverInfo: DbId: %d, NewDbName: %s, TableId: %d, TableName: %s, NewTableName: %s, PartitionId: %d, PartitionName: %s, NewPartitionName: %s",
+ c.DbId, c.NewDbName, c.TableId, c.TableName, c.NewTableName, c.PartitionId, c.PartitionName, c.NewPartitionName)
+}
diff --git a/pkg/ccr/record/rename_column.go b/pkg/ccr/record/rename_column.go
new file mode 100644
index 00000000..ab1c5388
--- /dev/null
+++ b/pkg/ccr/record/rename_column.go
@@ -0,0 +1,35 @@
+package record
+
+import (
+ "encoding/json"
+ "fmt"
+
+ "github.com/selectdb/ccr_syncer/pkg/xerror"
+)
+
+type RenameColumn struct {
+ DbId int64 `json:"dbId"`
+ TableId int64 `json:"tableId"`
+ ColName string `json:"colName"`
+ NewColName string `json:"newColName"`
+ IndexIdToSchemaVersion map[int64]int32 `json:"indexIdToSchemaVersion"`
+}
+
+func NewRenameColumnFromJson(data string) (*RenameColumn, error) {
+ var renameColumn RenameColumn
+ err := json.Unmarshal([]byte(data), &renameColumn)
+ if err != nil {
+ return nil, xerror.Wrap(err, xerror.Normal, "unmarshal rename column error")
+ }
+
+ if renameColumn.TableId == 0 {
+ return nil, xerror.Errorf(xerror.Normal, "table id not found")
+ }
+
+ return &renameColumn, nil
+}
+
+// Stringer
+func (r *RenameColumn) String() string {
+ return fmt.Sprintf("RenameColumn: DbId: %d, TableId: %d, ColName: %s, NewColName: %s, IndexIdToSchemaVersion: %v", r.DbId, r.TableId, r.ColName, r.NewColName, r.IndexIdToSchemaVersion)
+}
diff --git a/pkg/ccr/record/rename_partition.go b/pkg/ccr/record/rename_partition.go
new file mode 100644
index 00000000..1ab9bb35
--- /dev/null
+++ b/pkg/ccr/record/rename_partition.go
@@ -0,0 +1,44 @@
+package record
+
+import (
+ "encoding/json"
+ "fmt"
+
+ "github.com/selectdb/ccr_syncer/pkg/xerror"
+)
+
+type RenamePartition struct {
+ DbId int64 `json:"db"`
+ TableId int64 `json:"tb"`
+ PartitionId int64 `json:"p"`
+ NewPartitionName string `json:"nP"`
+ OldPartitionName string `json:"oP"`
+}
+
+func NewRenamePartitionFromJson(data string) (*RenamePartition, error) {
+ var rename RenamePartition
+ err := json.Unmarshal([]byte(data), &rename)
+ if err != nil {
+ return nil, xerror.Wrap(err, xerror.Normal, "unmarshal rename partition record error")
+ }
+
+ if rename.TableId == 0 {
+ return nil, xerror.Errorf(xerror.Normal, "rename partition record table id not found")
+ }
+
+ if rename.PartitionId == 0 {
+ return nil, xerror.Errorf(xerror.Normal, "rename partition record partition id not found")
+ }
+
+ if rename.NewPartitionName == "" {
+ return nil, xerror.Errorf(xerror.Normal, "rename partition record new partition name not found")
+ }
+
+ return &rename, nil
+}
+
+// Stringer
+func (r *RenamePartition) String() string {
+ return fmt.Sprintf("RenamePartition: DbId: %d, TableId: %d, PartitionId: %d, NewPartitionName: %s, OldPartitionName: %s",
+ r.DbId, r.TableId, r.PartitionId, r.NewPartitionName, r.OldPartitionName)
+}
diff --git a/pkg/ccr/record/rename_rollup.go b/pkg/ccr/record/rename_rollup.go
new file mode 100644
index 00000000..c5eb011d
--- /dev/null
+++ b/pkg/ccr/record/rename_rollup.go
@@ -0,0 +1,40 @@
+package record
+
+import (
+ "encoding/json"
+ "fmt"
+
+ "github.com/selectdb/ccr_syncer/pkg/xerror"
+)
+
+type RenameRollup struct {
+ DbId int64 `json:"db"`
+ TableId int64 `json:"tb"`
+ IndexId int64 `json:"ind"`
+ NewRollupName string `json:"nR"`
+ OldRollupName string `json:"oR"`
+}
+
+func NewRenameRollupFromJson(data string) (*RenameRollup, error) {
+ var record RenameRollup
+ err := json.Unmarshal([]byte(data), &record)
+ if err != nil {
+ return nil, xerror.Wrap(err, xerror.Normal, "unmarshal rename rollup record error")
+ }
+
+ if record.TableId == 0 {
+ return nil, xerror.Errorf(xerror.Normal, "rename rollup record table id not found")
+ }
+
+ if record.NewRollupName == "" {
+ return nil, xerror.Errorf(xerror.Normal, "rename rollup record old rollup name not found")
+ }
+
+ return &record, nil
+}
+
+// Stringer
+func (r *RenameRollup) String() string {
+ return fmt.Sprintf("RenameRollup: DbId: %d, TableId: %d, IndexId: %d, NewRollupName: %s, OldRollupName: %s",
+ r.DbId, r.TableId, r.IndexId, r.NewRollupName, r.OldRollupName)
+}
diff --git a/pkg/ccr/record/rename_table.go b/pkg/ccr/record/rename_table.go
new file mode 100644
index 00000000..1905133c
--- /dev/null
+++ b/pkg/ccr/record/rename_table.go
@@ -0,0 +1,40 @@
+package record
+
+import (
+ "encoding/json"
+ "fmt"
+
+ "github.com/selectdb/ccr_syncer/pkg/xerror"
+)
+
+type RenameTable struct {
+ DbId int64 `json:"db"`
+ TableId int64 `json:"tb"`
+ IndexId int64 `json:"ind"`
+ PartitionId int64 `json:"p"`
+ NewTableName string `json:"nT"`
+ OldTableName string `json:"oT"`
+ NewRollupName string `json:"nR"`
+ OldRollupName string `json:"oR"`
+ NewPartitionName string `json:"nP"`
+ OldPartitionName string `json:"oP"`
+}
+
+func NewRenameTableFromJson(data string) (*RenameTable, error) {
+ var renameTable RenameTable
+ err := json.Unmarshal([]byte(data), &renameTable)
+ if err != nil {
+ return nil, xerror.Wrap(err, xerror.Normal, "unmarshal rename table error")
+ }
+
+ if renameTable.TableId == 0 {
+ return nil, xerror.Errorf(xerror.Normal, "table id not found")
+ }
+
+ return &renameTable, nil
+}
+
+// Stringer
+func (r *RenameTable) String() string {
+ return fmt.Sprintf("RenameTable: DbId: %d, TableId: %d, PartitionId: %d, IndexId: %d, NewTableName: %s, OldTableName: %s, NewRollupName: %s, OldRollupName: %s, NewPartitionName: %s, OldPartitionName: %s", r.DbId, r.TableId, r.PartitionId, r.IndexId, r.NewTableName, r.OldTableName, r.NewRollupName, r.OldRollupName, r.NewPartitionName, r.OldPartitionName)
+}
diff --git a/pkg/ccr/record/replace_partition.go b/pkg/ccr/record/replace_partition.go
new file mode 100644
index 00000000..02b1bd90
--- /dev/null
+++ b/pkg/ccr/record/replace_partition.go
@@ -0,0 +1,40 @@
+package record
+
+import (
+ "encoding/json"
+
+ "github.com/selectdb/ccr_syncer/pkg/xerror"
+)
+
+type ReplacePartitionRecord struct {
+ DbId int64 `json:"dbId"`
+ DbName string `json:"dbName"`
+ TableId int64 `json:"tblId"`
+ TableName string `json:"tblName"`
+ Partitions []string `json:"partitions"`
+ TempPartitions []string `json:"tempPartitions"`
+ StrictRange bool `json:"strictRange"`
+ UseTempName bool `json:"useTempPartitionName"`
+}
+
+func NewReplacePartitionFromJson(data string) (*ReplacePartitionRecord, error) {
+ var replacePartition ReplacePartitionRecord
+ err := json.Unmarshal([]byte(data), &replacePartition)
+ if err != nil {
+ return nil, xerror.Wrap(err, xerror.Normal, "unmarshal replace partition error")
+ }
+
+ if len(replacePartition.TempPartitions) == 0 {
+ return nil, xerror.Errorf(xerror.Normal, "the temp partitions of the replace partition record is empty")
+ }
+
+ if replacePartition.TableId == 0 {
+ return nil, xerror.Errorf(xerror.Normal, "table id not found")
+ }
+
+ if replacePartition.TableName == "" {
+ return nil, xerror.Errorf(xerror.Normal, "table name is empty")
+ }
+
+ return &replacePartition, nil
+}
diff --git a/pkg/ccr/record/replace_table.go b/pkg/ccr/record/replace_table.go
new file mode 100644
index 00000000..718ed348
--- /dev/null
+++ b/pkg/ccr/record/replace_table.go
@@ -0,0 +1,50 @@
+package record
+
+import (
+ "encoding/json"
+ "fmt"
+
+ "github.com/selectdb/ccr_syncer/pkg/xerror"
+)
+
+type ReplaceTableRecord struct {
+ DbId int64 `json:"dbId"`
+ OriginTableId int64 `json:"origTblId"`
+ OriginTableName string `json:"origTblName"`
+ NewTableId int64 `json:"newTblName"`
+ NewTableName string `json:"actualNewTblName"`
+ SwapTable bool `json:"swapTable"`
+ IsForce bool `json:"isForce"`
+}
+
+func NewReplaceTableRecordFromJson(data string) (*ReplaceTableRecord, error) {
+ record := &ReplaceTableRecord{}
+ err := json.Unmarshal([]byte(data), record)
+ if err != nil {
+ return nil, xerror.Wrap(err, xerror.Normal, "unmarshal replace table record error")
+ }
+
+ if record.OriginTableId == 0 {
+ return nil, xerror.Errorf(xerror.Normal, "table id of replace table record not found")
+ }
+
+ if record.OriginTableName == "" {
+ return nil, xerror.Errorf(xerror.Normal, "table name of replace table record not found")
+ }
+
+ if record.NewTableId == 0 {
+ return nil, xerror.Errorf(xerror.Normal, "new table id of replace table record not found")
+ }
+
+ if record.NewTableName == "" {
+ return nil, xerror.Errorf(xerror.Normal, "new table name of replace table record not found")
+ }
+
+ return record, nil
+}
+
+// Stringer
+func (r *ReplaceTableRecord) String() string {
+ return fmt.Sprintf("ReplaceTableRecord: DbId: %d, OriginTableId: %d, OriginTableName: %s, NewTableId: %d, NewTableName: %s, SwapTable: %v, IsForce: %v",
+ r.DbId, r.OriginTableId, r.OriginTableName, r.NewTableId, r.NewTableName, r.SwapTable, r.IsForce)
+}
diff --git a/pkg/ccr/record/restore_info.go b/pkg/ccr/record/restore_info.go
new file mode 100644
index 00000000..030d1af2
--- /dev/null
+++ b/pkg/ccr/record/restore_info.go
@@ -0,0 +1,26 @@
+package record
+
+import (
+ "encoding/json"
+
+ "github.com/selectdb/ccr_syncer/pkg/xerror"
+)
+
+type RestoreInfo struct {
+ DbId int64 `json:"dbId"`
+ DbName string `json:"dbName"`
+ TableInfo map[int64]string `json:"tableInfo"`
+}
+
+func NewRestoreInfoFromJson(data string) (*RestoreInfo, error) {
+ var restoreInfo RestoreInfo
+ err := json.Unmarshal([]byte(data), &restoreInfo)
+ if err != nil {
+ return nil, xerror.Wrap(err, xerror.Normal, "unmarshal create table error")
+ }
+
+ if restoreInfo.DbId == 0 {
+ return nil, xerror.Errorf(xerror.Normal, "db id not found")
+ }
+ return &restoreInfo, nil
+}
diff --git a/pkg/ccr/record/truncate_table.go b/pkg/ccr/record/truncate_table.go
index 3bd9004d..c40c75eb 100644
--- a/pkg/ccr/record/truncate_table.go
+++ b/pkg/ccr/record/truncate_table.go
@@ -9,7 +9,7 @@ import (
// {
// "dbId": 10079,
-// "db": "default_cluster:ccr",
+// "db": "default_cluster:ccr", # "default_cluster:" prefix will be removed in Doris v2.1
// "tblId": 77395,
// "table": "src_1_alias",
// "isEntireTable": false,
diff --git a/pkg/ccr/record/upsert.go b/pkg/ccr/record/upsert.go
index ced175d8..fcfea4b5 100644
--- a/pkg/ccr/record/upsert.go
+++ b/pkg/ccr/record/upsert.go
@@ -11,10 +11,13 @@ type PartitionRecord struct {
Id int64 `json:"partitionId"`
Range string `json:"range"`
Version int64 `json:"version"`
+ IsTemp bool `json:"isTempPartition"`
+ Stid int64 `json:"stid"`
}
func (p PartitionRecord) String() string {
- return fmt.Sprintf("PartitionRecord{Id: %d, Range: %s, Version: %d}", p.Id, p.Range, p.Version)
+ return fmt.Sprintf("PartitionRecord{Id: %d, Range: %s, Version: %d, IsTemp: %v, Stid: %d}",
+ p.Id, p.Range, p.Version, p.IsTemp, p.Stid)
}
type TableRecord struct {
@@ -34,11 +37,12 @@ type Upsert struct {
Label string `json:"label"`
DbID int64 `json:"dbId"`
TableRecords map[int64]*TableRecord `json:"tableRecords"`
+ Stids []int64 `json:"stids"`
}
// Stringer
func (u Upsert) String() string {
- return fmt.Sprintf("Upsert{CommitSeq: %d, TxnID: %d, TimeStamp: %d, Label: %s, DbID: %d, TableRecords: %v}", u.CommitSeq, u.TxnID, u.TimeStamp, u.Label, u.DbID, u.TableRecords)
+ return fmt.Sprintf("Upsert{CommitSeq: %d, TxnID: %d, TimeStamp: %d, Label: %s, DbID: %d, TableRecords: %v, Stids: %v}", u.CommitSeq, u.TxnID, u.TimeStamp, u.Label, u.DbID, u.TableRecords, u.Stids)
}
// {
diff --git a/pkg/ccr/rpc_factory_mock.go b/pkg/ccr/rpc_factory_mock.go
index c8b44c5a..e4a58219 100644
--- a/pkg/ccr/rpc_factory_mock.go
+++ b/pkg/ccr/rpc_factory_mock.go
@@ -1,6 +1,10 @@
// Code generated by MockGen. DO NOT EDIT.
-// Source: rpc/rpc_factory.go
-
+// Source: pkg/rpc/rpc_factory.go
+//
+// Generated by this command:
+//
+// mockgen -source=pkg/rpc/rpc_factory.go -destination=pkg/ccr/rpc_factory_mock.go -package=ccr
+//
// Package ccr is a generated GoMock package.
package ccr
@@ -45,7 +49,7 @@ func (m *MockIRpcFactory) NewBeRpc(be *base.Backend) (rpc.IBeRpc, error) {
}
// NewBeRpc indicates an expected call of NewBeRpc.
-func (mr *MockIRpcFactoryMockRecorder) NewBeRpc(be interface{}) *gomock.Call {
+func (mr *MockIRpcFactoryMockRecorder) NewBeRpc(be any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewBeRpc", reflect.TypeOf((*MockIRpcFactory)(nil).NewBeRpc), be)
}
@@ -60,7 +64,7 @@ func (m *MockIRpcFactory) NewFeRpc(spec *base.Spec) (rpc.IFeRpc, error) {
}
// NewFeRpc indicates an expected call of NewFeRpc.
-func (mr *MockIRpcFactoryMockRecorder) NewFeRpc(spec interface{}) *gomock.Call {
+func (mr *MockIRpcFactoryMockRecorder) NewFeRpc(spec any) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewFeRpc", reflect.TypeOf((*MockIRpcFactory)(nil).NewFeRpc), spec)
}
diff --git a/pkg/ccr/thrift_meta.go b/pkg/ccr/thrift_meta.go
new file mode 100644
index 00000000..ed64571d
--- /dev/null
+++ b/pkg/ccr/thrift_meta.go
@@ -0,0 +1,285 @@
+package ccr
+
+import (
+ "github.com/selectdb/ccr_syncer/pkg/ccr/base"
+ "github.com/selectdb/ccr_syncer/pkg/rpc"
+ "github.com/selectdb/ccr_syncer/pkg/xerror"
+
+ tstatus "github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/status"
+
+ "github.com/tidwall/btree"
+)
+
+var DefaultThriftMetaFactory ThriftMetaFactory = &defaultThriftMetaFactory{}
+
+type ThriftMetaFactory interface {
+ NewThriftMeta(spec *base.Spec, rpcFactory rpc.IRpcFactory, tableIds []int64) (*ThriftMeta, error)
+}
+
+type defaultThriftMetaFactory struct{}
+
+func (dtmf *defaultThriftMetaFactory) NewThriftMeta(spec *base.Spec, rpcFactory rpc.IRpcFactory, tableIds []int64) (*ThriftMeta, error) {
+ return NewThriftMeta(spec, rpcFactory, tableIds)
+}
+
+func NewThriftMeta(spec *base.Spec, rpcFactory rpc.IRpcFactory, tableIds []int64) (*ThriftMeta, error) {
+ meta := NewMeta(spec)
+ feRpc, err := rpcFactory.NewFeRpc(spec)
+ if err != nil {
+ return nil, err
+ }
+
+ // Step 1: get backends
+ backendMetaResp, err := feRpc.GetBackends(spec)
+ if err != nil {
+ return nil, err
+ }
+
+ if backendMetaResp.GetStatus().GetStatusCode() != tstatus.TStatusCode_OK {
+ return nil, xerror.Errorf(xerror.Meta, "get backend meta failed, status: %s", backendMetaResp.GetStatus())
+ }
+
+ if !backendMetaResp.IsSetBackends() {
+ return nil, xerror.New(xerror.Meta, "get backend meta failed, backend meta not set")
+ }
+
+ for _, backend := range backendMetaResp.GetBackends() {
+ backendMeta := &base.Backend{
+ Id: backend.GetId(),
+ Host: backend.GetHost(),
+ BePort: uint16(backend.GetBePort()),
+ HttpPort: uint16(backend.GetHttpPort()),
+ BrpcPort: uint16(backend.GetBrpcPort()),
+ }
+ meta.Backends[backendMeta.Id] = backendMeta
+ }
+
+ // Step 2: get table metas
+ tableMetaResp, err := feRpc.GetTableMeta(spec, tableIds)
+ if err != nil {
+ return nil, err
+ }
+
+ if tableMetaResp.GetStatus().GetStatusCode() != tstatus.TStatusCode_OK {
+ return nil, xerror.Errorf(xerror.Meta, "get table meta failed, status: %s", tableMetaResp.GetStatus())
+ }
+
+ if !tableMetaResp.IsSetDbMeta() {
+ return nil, xerror.New(xerror.Meta, "get table meta failed, db meta not set")
+ }
+
+ dbMeta := tableMetaResp.GetDbMeta()
+ for _, table := range dbMeta.GetTables() {
+ tableMeta := &TableMeta{
+ DatabaseMeta: &meta.DatabaseMeta,
+ Id: table.GetId(),
+ Name: table.GetName(),
+ PartitionIdMap: make(map[int64]*PartitionMeta),
+ PartitionRangeMap: make(map[string]*PartitionMeta),
+ }
+ meta.Id = dbMeta.GetId()
+ meta.Tables[tableMeta.Id] = tableMeta
+ meta.TableName2IdMap[tableMeta.Name] = tableMeta.Id
+
+ for _, partition := range table.GetPartitions() {
+ partitionMeta := &PartitionMeta{
+ TableMeta: tableMeta,
+ Id: partition.GetId(),
+ Name: partition.GetName(),
+ Range: partition.GetRange(),
+ VisibleVersion: partition.GetVisibleVersion(),
+ IndexIdMap: make(map[int64]*IndexMeta),
+ IndexNameMap: make(map[string]*IndexMeta),
+ }
+ tableMeta.PartitionIdMap[partitionMeta.Id] = partitionMeta
+ tableMeta.PartitionRangeMap[partitionMeta.Range] = partitionMeta
+
+ for _, index := range partition.GetIndexes() {
+ indexName := index.GetName()
+ isBaseIndex := indexName == tableMeta.Name // it is accurate, since lock is held
+ indexMeta := &IndexMeta{
+ PartitionMeta: partitionMeta,
+ Id: index.GetId(),
+ Name: indexName,
+ IsBaseIndex: isBaseIndex,
+ TabletMetas: btree.NewMap[int64, *TabletMeta](degree),
+ ReplicaMetas: btree.NewMap[int64, *ReplicaMeta](degree),
+ }
+ partitionMeta.IndexIdMap[indexMeta.Id] = indexMeta
+ partitionMeta.IndexNameMap[indexMeta.Name] = indexMeta
+ if tableMeta.Name == indexMeta.Name {
+ tableMeta.BaseIndexId = indexMeta.Id
+ }
+
+ for _, tablet := range index.GetTablets() {
+ tabletMeta := &TabletMeta{
+ IndexMeta: indexMeta,
+ Id: tablet.GetId(),
+ ReplicaMetas: btree.NewMap[int64, *ReplicaMeta](degree),
+ }
+ indexMeta.TabletMetas.Set(tabletMeta.Id, tabletMeta)
+
+ for _, replica := range tablet.GetReplicas() {
+ replicaMeta := &ReplicaMeta{
+ TabletMeta: tabletMeta,
+ Id: replica.GetId(),
+ TabletId: tabletMeta.Id,
+ BackendId: replica.GetBackendId(),
+ Version: replica.GetVersion(),
+ }
+ tabletMeta.ReplicaMetas.Set(replicaMeta.Id, replicaMeta)
+ indexMeta.ReplicaMetas.Set(replicaMeta.Id, replicaMeta)
+ }
+ }
+ }
+ }
+ }
+
+ droppedPartitions := make(map[int64]struct{})
+ for _, partition := range dbMeta.GetDroppedPartitions() {
+ droppedPartitions[partition] = struct{}{}
+ }
+ droppedTables := make(map[int64]struct{})
+ for _, table := range dbMeta.GetDroppedTables() {
+ droppedTables[table] = struct{}{}
+ }
+ droppedIndexes := make(map[int64]struct{})
+ for _, index := range dbMeta.GetDroppedIndexes() {
+ droppedIndexes[index] = struct{}{}
+ }
+
+ return &ThriftMeta{
+ meta: meta,
+ droppedPartitions: droppedPartitions,
+ droppedTables: droppedTables,
+ droppedIndexes: droppedIndexes,
+ }, nil
+}
+
+type ThriftMeta struct {
+ meta *Meta
+ droppedPartitions map[int64]struct{}
+ droppedTables map[int64]struct{}
+ droppedIndexes map[int64]struct{}
+}
+
+func (tm *ThriftMeta) GetTablets(tableId, partitionId, indexId int64) (*btree.Map[int64, *TabletMeta], error) {
+ dbId := tm.meta.Id
+
+ tableMeta, ok := tm.meta.Tables[tableId]
+ if !ok {
+ return nil, xerror.Errorf(xerror.Meta, "dbId: %d, tableId: %d not found", dbId, tableId)
+ }
+
+ partitionMeta, ok := tableMeta.PartitionIdMap[partitionId]
+ if !ok {
+ return nil, xerror.Errorf(xerror.Meta, "dbId: %d, tableId: %d, partitionId: %d not found", dbId, tableId, partitionId)
+ }
+
+ indexMeta, ok := partitionMeta.IndexIdMap[indexId]
+ if !ok {
+ return nil, xerror.Errorf(xerror.Meta, "dbId: %d, tableId: %d, partitionId: %d, indexId: %d not found", dbId, tableId, partitionId, indexId)
+ }
+
+ return indexMeta.TabletMetas, nil
+}
+
+func (tm *ThriftMeta) GetPartitionIdByRange(tableId int64, partitionRange string) (int64, error) {
+ dbId := tm.meta.Id
+
+ tableMeta, ok := tm.meta.Tables[tableId]
+ if !ok {
+ return 0, xerror.Errorf(xerror.Meta, "dbId: %d, tableId: %d not found", dbId, tableId)
+ }
+
+ partitionMeta, ok := tableMeta.PartitionRangeMap[partitionRange]
+ if !ok {
+ return 0, xerror.Errorf(xerror.Meta, "dbId: %d, tableId: %d, partitionRange: %s not found", dbId, tableId, partitionRange)
+ }
+
+ return partitionMeta.Id, nil
+}
+
+func (tm *ThriftMeta) GetPartitionRangeMap(tableId int64) (map[string]*PartitionMeta, error) {
+ dbId := tm.meta.Id
+
+ tableMeta, ok := tm.meta.Tables[tableId]
+ if !ok {
+ return nil, xerror.Errorf(xerror.Meta, "dbId: %d, tableId: %d not found", dbId, tableId)
+ }
+
+ return tableMeta.PartitionRangeMap, nil
+}
+
+func (tm *ThriftMeta) GetIndexIdMap(tableId, partitionId int64) (map[int64]*IndexMeta, error) {
+ dbId := tm.meta.Id
+
+ tableMeta, ok := tm.meta.Tables[tableId]
+ if !ok {
+ return nil, xerror.Errorf(xerror.Meta, "dbId: %d, tableId: %d not found", dbId, tableId)
+ }
+
+ partitionMeta, ok := tableMeta.PartitionIdMap[partitionId]
+ if !ok {
+ return nil, xerror.Errorf(xerror.Meta, "dbId: %d, tableId: %d, partitionId: %d not found", dbId, tableId, partitionId)
+ }
+
+ return partitionMeta.IndexIdMap, nil
+}
+
+func (tm *ThriftMeta) GetIndexNameMap(tableId, partitionId int64) (map[string]*IndexMeta, *IndexMeta, error) {
+ dbId := tm.meta.Id
+
+ tableMeta, ok := tm.meta.Tables[tableId]
+ if !ok {
+ return nil, nil, xerror.Errorf(xerror.Meta, "dbId: %d, tableId: %d not found", dbId, tableId)
+ }
+
+ partitionMeta, ok := tableMeta.PartitionIdMap[partitionId]
+ if !ok {
+ return nil, nil, xerror.Errorf(xerror.Meta, "dbId: %d, tableId: %d, partitionId: %d not found", dbId, tableId, partitionId)
+ }
+
+ baseIndex, ok := partitionMeta.IndexNameMap[tableMeta.Name]
+ if !ok {
+ return nil, nil, xerror.Errorf(xerror.Meta, "dbId: %d, tableId: %d, partitionId: %d, indexName: %s not found", dbId, tableId, partitionId, tableMeta.Name)
+ }
+
+ return partitionMeta.IndexNameMap, baseIndex, nil
+}
+
+func (tm *ThriftMeta) GetBackendMap() (map[int64]*base.Backend, error) {
+ if tm.meta.HostMapping == nil {
+ return tm.meta.Backends, nil
+ }
+
+ backends := make(map[int64]*base.Backend)
+ for id, backend := range tm.meta.Backends {
+ if host, ok := tm.meta.HostMapping[backend.Host]; ok {
+ backend.Host = host
+ } else {
+ return nil, xerror.Errorf(xerror.Normal,
+ "the public ip of host %s is not found, consider adding it via HTTP API /update_host_mapping", backend.Host)
+ }
+ backends[id] = backend
+ }
+ return backends, nil
+}
+
+// Whether the target partition are dropped
+func (tm *ThriftMeta) IsPartitionDropped(partitionId int64) bool {
+ _, ok := tm.droppedPartitions[partitionId]
+ return ok
+}
+
+// Whether the target table are dropped
+func (tm *ThriftMeta) IsTableDropped(tableId int64) bool {
+ _, ok := tm.droppedTables[tableId]
+ return ok
+}
+
+// Whether the target index are dropped
+func (tm *ThriftMeta) IsIndexDropped(tableId int64) bool {
+ _, ok := tm.droppedIndexes[tableId]
+ return ok
+}
diff --git a/pkg/ccr/utils.go b/pkg/ccr/utils.go
index 6b9e919d..51a586c7 100644
--- a/pkg/ccr/utils.go
+++ b/pkg/ccr/utils.go
@@ -6,14 +6,58 @@ import (
"github.com/selectdb/ccr_syncer/pkg/xerror"
)
-func ExtractTableCommitSeqMap(data []byte) (map[int64]int64, error) {
- type JobInfo struct {
- TableCommitSeqMap map[int64]int64 `json:"table_commit_seq_map"`
- }
- var jobInfo JobInfo
+type BackupViewInfo struct {
+ Id int64 `json:"id"`
+ Name string `json:"name"`
+}
+
+type BackupOlapTableInfo struct {
+ Id int64 `json:"id"`
+}
+
+type NewBackupObject struct {
+ Views []BackupViewInfo `json:"views"`
+}
+
+type BackupJobInfo struct {
+ TableCommitSeqMap map[int64]int64 `json:"table_commit_seq_map"`
+ BackupObjects map[string]BackupOlapTableInfo `json:"backup_objects"`
+ NewBackupObjects *NewBackupObject `json:"new_backup_objects"`
+}
+func NewBackupJobInfoFromJson(data []byte) (*BackupJobInfo, error) {
+ jobInfo := &BackupJobInfo{}
if err := json.Unmarshal(data, &jobInfo); err != nil {
return nil, xerror.Wrapf(err, xerror.Normal, "unmarshal job info error: %v", err)
}
- return jobInfo.TableCommitSeqMap, nil
+ return jobInfo, nil
+}
+
+func (i *BackupJobInfo) TableNameMapping() map[int64]string {
+ tableMapping := make(map[int64]string)
+ for tableName, tableInfo := range i.BackupObjects {
+ tableMapping[tableInfo.Id] = tableName
+ }
+ return tableMapping
+}
+
+// Get the table id by table name, return -1 if not found
+func (i *BackupJobInfo) TableId(name string) int64 {
+ if tableInfo, ok := i.BackupObjects[name]; ok {
+ return tableInfo.Id
+ }
+
+ return -1
+}
+
+func (i *BackupJobInfo) Views() []string {
+ if i.NewBackupObjects == nil {
+ return []string{}
+ }
+
+ views := make([]string, 0)
+ for _, viewInfo := range i.NewBackupObjects.Views {
+ views = append(views, viewInfo.Name)
+ }
+ return views
}
diff --git a/pkg/rpc/Makefile b/pkg/rpc/Makefile
new file mode 100644
index 00000000..4b52d47e
--- /dev/null
+++ b/pkg/rpc/Makefile
@@ -0,0 +1,3 @@
+gen_thrift:
+ kitex -module github.com/selectdb/ccr_syncer thrift/FrontendService.thrift
+ kitex -module github.com/selectdb/ccr_syncer thrift/BackendService.thrift
diff --git a/pkg/rpc/be.go b/pkg/rpc/be.go
index cf8b012a..bec3d244 100644
--- a/pkg/rpc/be.go
+++ b/pkg/rpc/be.go
@@ -26,7 +26,8 @@ func (beRpc *BeRpc) IngestBinlog(req *bestruct.TIngestBinlogRequest) (*bestruct.
client := beRpc.client
if result, err := client.IngestBinlog(context.Background(), req); err != nil {
- return nil, xerror.Wrapf(err, xerror.Normal, "IngestBinlog error: %v", err)
+ return nil, xerror.Wrapf(err, xerror.Normal,
+ "IngestBinlog error: %v, txnId: %d, be: %v", err, req.GetTxnId(), beRpc.backend)
} else {
return result, nil
}
diff --git a/pkg/rpc/concurrency.go b/pkg/rpc/concurrency.go
new file mode 100644
index 00000000..d10bd827
--- /dev/null
+++ b/pkg/rpc/concurrency.go
@@ -0,0 +1,72 @@
+package rpc
+
+import (
+ "flag"
+ "sync"
+)
+
+var (
+ FlagMaxIngestConcurrencyPerBackend int64
+)
+
+func init() {
+ flag.Int64Var(&FlagMaxIngestConcurrencyPerBackend, "max_ingest_concurrency_per_backend", 48,
+ "The max concurrency of the binlog ingesting per backend")
+}
+
+type ConcurrencyWindow struct {
+ mu *sync.Mutex
+ cond *sync.Cond
+
+ id int64
+ inflights int64
+}
+
+func newCongestionWindow(id int64) *ConcurrencyWindow {
+ mu := &sync.Mutex{}
+ return &ConcurrencyWindow{
+ mu: mu,
+ cond: sync.NewCond(mu),
+ id: id,
+ inflights: 0,
+ }
+}
+
+func (cw *ConcurrencyWindow) Acquire() {
+ cw.mu.Lock()
+ defer cw.mu.Unlock()
+
+ for cw.inflights+1 > FlagMaxIngestConcurrencyPerBackend {
+ cw.cond.Wait()
+ }
+ cw.inflights += 1
+}
+
+func (cw *ConcurrencyWindow) Release() {
+ cw.mu.Lock()
+ defer cw.mu.Unlock()
+
+ if cw.inflights == 0 {
+ return
+ }
+
+ cw.inflights -= 1
+ cw.cond.Signal()
+}
+
+type ConcurrencyManager struct {
+ windows sync.Map
+}
+
+func NewConcurrencyManager() *ConcurrencyManager {
+ return &ConcurrencyManager{}
+}
+
+func (cm *ConcurrencyManager) GetWindow(id int64) *ConcurrencyWindow {
+ value, ok := cm.windows.Load(id)
+ if !ok {
+ window := newCongestionWindow(id)
+ value, ok = cm.windows.LoadOrStore(id, window)
+ }
+ return value.(*ConcurrencyWindow)
+}
diff --git a/pkg/rpc/fe.go b/pkg/rpc/fe.go
index 62c02901..01217118 100644
--- a/pkg/rpc/fe.go
+++ b/pkg/rpc/fe.go
@@ -2,34 +2,472 @@ package rpc
import (
"context"
-
- "github.com/selectdb/ccr_syncer/pkg/ccr/base"
- "github.com/selectdb/ccr_syncer/pkg/xerror"
+ "errors"
+ "flag"
+ "fmt"
+ "strings"
+ "sync"
+ "time"
festruct "github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/frontendservice"
feservice "github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/frontendservice/frontendservice"
+ tstatus "github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/status"
festruct_types "github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/types"
+ "github.com/selectdb/ccr_syncer/pkg/utils"
+ "github.com/selectdb/ccr_syncer/pkg/xerror"
+ "github.com/cloudwego/kitex/client"
+ "github.com/cloudwego/kitex/client/callopt"
+ "github.com/cloudwego/kitex/pkg/kerrors"
+ "github.com/selectdb/ccr_syncer/pkg/ccr/base"
log "github.com/sirupsen/logrus"
)
-const (
- LOCAL_REPO_NAME = ""
+var (
+ localRepoName string
+ commitTxnTimeout time.Duration
+ connectTimeout time.Duration
+ rpcTimeout time.Duration
)
+var ErrFeNotMasterCompatible = xerror.NewWithoutStack(xerror.FE, "not master compatible")
+
+func init() {
+ flag.StringVar(&localRepoName, "local_repo_name", "", "local_repo_name")
+ flag.DurationVar(&commitTxnTimeout, "commit_txn_timeout", 33*time.Second, "commmit_txn_timeout")
+ flag.DurationVar(&connectTimeout, "connect_timeout", 10*time.Second, "connect timeout")
+ flag.DurationVar(&rpcTimeout, "rpc_timeout", 30*time.Second, "rpc timeout")
+}
+
+// canUseNextAddr means can try next addr, err is a connection error, not a method not found or other error
+func canUseNextAddr(err error) bool {
+ if errors.Is(err, kerrors.ErrNoConnection) {
+ return true
+ }
+ if errors.Is(err, kerrors.ErrNoResolver) {
+ return true
+ }
+ if errors.Is(err, kerrors.ErrNoDestAddress) {
+ return true
+ }
+ if errors.Is(err, kerrors.ErrRemoteOrNetwork) {
+ return true
+ }
+
+ errMsg := err.Error()
+ if strings.Contains(errMsg, "connection has been closed by peer") {
+ return true
+ }
+ if strings.Contains(errMsg, "closed network connection") {
+ return true
+ }
+ if strings.Contains(errMsg, "connection reset by peer") {
+ return true
+ }
+ if strings.Contains(errMsg, "connection reset by peer") {
+ return true
+ }
+
+ return false
+}
+
+type RestoreSnapshotRequest struct {
+ TableRefs []*festruct.TTableRef
+ SnapshotName string
+ SnapshotResult *festruct.TGetSnapshotResult_
+ AtomicRestore bool
+ CleanPartitions bool
+ CleanTables bool
+ Compress bool
+}
+
type IFeRpc interface {
BeginTransaction(*base.Spec, string, []int64) (*festruct.TBeginTxnResult_, error)
+ BeginTransactionForTxnInsert(*base.Spec, string, []int64, int64) (*festruct.TBeginTxnResult_, error)
CommitTransaction(*base.Spec, int64, []*festruct_types.TTabletCommitInfo) (*festruct.TCommitTxnResult_, error)
+ CommitTransactionForTxnInsert(*base.Spec, int64, bool, []*festruct.TSubTxnInfo) (*festruct.TCommitTxnResult_, error)
RollbackTransaction(spec *base.Spec, txnId int64) (*festruct.TRollbackTxnResult_, error)
GetBinlog(*base.Spec, int64) (*festruct.TGetBinlogResult_, error)
GetBinlogLag(*base.Spec, int64) (*festruct.TGetBinlogLagResult_, error)
- GetSnapshot(*base.Spec, string) (*festruct.TGetSnapshotResult_, error)
- RestoreSnapshot(*base.Spec, []*festruct.TTableRef, string, *festruct.TGetSnapshotResult_) (*festruct.TRestoreSnapshotResult_, error)
- GetMasterToken(*base.Spec) (string, error)
+ GetSnapshot(*base.Spec, string, bool) (*festruct.TGetSnapshotResult_, error)
+ RestoreSnapshot(*base.Spec, *RestoreSnapshotRequest) (*festruct.TRestoreSnapshotResult_, error)
+ GetMasterToken(*base.Spec) (*festruct.TGetMasterTokenResult_, error)
+ GetDbMeta(spec *base.Spec) (*festruct.TGetMetaResult_, error)
+ GetTableMeta(spec *base.Spec, tableIds []int64) (*festruct.TGetMetaResult_, error)
+ GetBackends(spec *base.Spec) (*festruct.TGetBackendMetaResult_, error)
+
+ Address() string
}
type FeRpc struct {
- client feservice.Client
+ spec *base.Spec
+ masterClient IFeRpc
+ clients map[string]IFeRpc
+ cachedFeAddrs map[string]bool
+ lock sync.RWMutex // for get client
+}
+
+func NewFeRpc(spec *base.Spec) (*FeRpc, error) {
+ addr := fmt.Sprintf("%s:%s", spec.Host, spec.ThriftPort)
+ client, err := newSingleFeClient(addr)
+ if err != nil {
+ return nil, xerror.Wrapf(err, xerror.RPC, "NewFeClient error: %v", err)
+ }
+
+ clients := make(map[string]IFeRpc)
+ clients[client.Address()] = client
+ cachedFeAddrs := make(map[string]bool)
+ for _, fe := range spec.Frontends {
+ addr := fmt.Sprintf("%s:%s", fe.Host, fe.ThriftPort)
+
+ if _, ok := cachedFeAddrs[addr]; ok {
+ continue
+ }
+
+ // for cached all spec clients
+ if client, err := newSingleFeClient(addr); err != nil {
+ log.Warnf("new fe client error: %+v", err)
+ } else {
+ clients[client.Address()] = client
+ }
+ cachedFeAddrs[addr] = true
+ }
+
+ return &FeRpc{
+ spec: spec,
+ masterClient: client,
+ clients: clients,
+ cachedFeAddrs: cachedFeAddrs,
+ }, nil
+}
+
+// get all fe addrs
+// "[masterAddr],otherCachedFeAddrs" => "[127.0.0.1:1000],127.0.1:1001,127.0.1:1002"
+func (rpc *FeRpc) Address() string {
+ cachedFeAddrs := rpc.getCacheFeAddrs()
+ masterClient := rpc.getMasterClient()
+
+ var addrBuilder strings.Builder
+ addrBuilder.WriteString(fmt.Sprintf("[%s]", masterClient.Address()))
+ delete(cachedFeAddrs, masterClient.Address())
+ for addr := range cachedFeAddrs {
+ addrBuilder.WriteString(",")
+ addrBuilder.WriteString(addr)
+ }
+ return addrBuilder.String()
+}
+
+type resultType interface {
+ GetStatus() *tstatus.TStatus
+ IsSetMasterAddress() bool
+ GetMasterAddress() *festruct_types.TNetworkAddress
+}
+type callerType func(client IFeRpc) (resultType, error)
+
+func (rpc *FeRpc) getMasterClient() IFeRpc {
+ rpc.lock.RLock()
+ defer rpc.lock.RUnlock()
+
+ return rpc.masterClient
+}
+
+func (rpc *FeRpc) updateMasterClient(masterClient IFeRpc) {
+ rpc.lock.Lock()
+ defer rpc.lock.Unlock()
+
+ rpc.clients[masterClient.Address()] = masterClient
+ rpc.masterClient = masterClient
+}
+
+func (rpc *FeRpc) getClient(addr string) (IFeRpc, bool) {
+ rpc.lock.RLock()
+ defer rpc.lock.RUnlock()
+
+ client, ok := rpc.clients[addr]
+ return client, ok
+}
+
+func (rpc *FeRpc) addClient(client IFeRpc) {
+ rpc.lock.Lock()
+ defer rpc.lock.Unlock()
+
+ rpc.clients[client.Address()] = client
+}
+
+func (rpc *FeRpc) getClients() map[string]IFeRpc {
+ rpc.lock.RLock()
+ defer rpc.lock.RUnlock()
+
+ return utils.CopyMap(rpc.clients)
+}
+
+func (rpc *FeRpc) getCacheFeAddrs() map[string]bool {
+ rpc.lock.RLock()
+ defer rpc.lock.RUnlock()
+
+ return utils.CopyMap(rpc.cachedFeAddrs)
+}
+
+type retryWithMasterRedirectAndCachedClientsRpc struct {
+ rpc *FeRpc
+ caller callerType
+ notriedClients map[string]IFeRpc
+}
+
+type call0Result struct {
+ canUseNextAddr bool
+ resp resultType
+ err error
+ masterAddr string
+}
+
+func (r *retryWithMasterRedirectAndCachedClientsRpc) call0(masterClient IFeRpc) *call0Result {
+ caller := r.caller
+ resp, err := caller(masterClient)
+ log.Tracef("call resp: %.128v, error: %+v", resp, err)
+
+ // Step 1: check error
+ if err != nil {
+ if !canUseNextAddr(err) {
+ return &call0Result{
+ canUseNextAddr: false,
+ err: xerror.Wrap(err, xerror.FE, "thrift error"),
+ }
+ } else {
+ log.Warnf("call error: %+v, try next addr", err)
+ return &call0Result{
+ canUseNextAddr: true,
+ err: xerror.Wrap(err, xerror.FE, "thrift error"),
+ }
+ }
+ }
+
+ // Step 2: check need redirect
+ if resp.GetStatus().GetStatusCode() != tstatus.TStatusCode_NOT_MASTER {
+ return &call0Result{
+ canUseNextAddr: false,
+ resp: resp,
+ err: nil,
+ }
+ }
+
+ // no compatible for master
+ if !resp.IsSetMasterAddress() {
+ err = xerror.XPanicWrapf(ErrFeNotMasterCompatible, "fe addr [%s]", masterClient.Address())
+ return &call0Result{
+ canUseNextAddr: true,
+ err: err, // not nil
+ }
+ }
+
+ // switch to master
+ masterAddr := resp.GetMasterAddress()
+ err = xerror.Errorf(xerror.FE, "addr [%s] is not master", masterAddr)
+
+ // convert private ip to public ip, if need
+ hostname := masterAddr.Hostname
+ if r.rpc.spec.HostMapping != nil {
+ if host, ok := r.rpc.spec.HostMapping[hostname]; ok {
+ hostname = host
+ } else {
+ return &call0Result{
+ canUseNextAddr: true,
+ err: xerror.Errorf(xerror.Normal,
+ "the public ip of %s is not found, consider adding it via HTTP API /update_host_mapping", hostname),
+ }
+ }
+ }
+
+ return &call0Result{
+ canUseNextAddr: true,
+ resp: resp,
+ masterAddr: fmt.Sprintf("%s:%d", hostname, masterAddr.Port),
+ err: err, // not nil
+ }
+}
+
+func (r *retryWithMasterRedirectAndCachedClientsRpc) call() (resultType, error) {
+ rpc := r.rpc
+ masterClient := rpc.masterClient
+
+ // Step 1: try master
+ result := r.call0(masterClient)
+ log.Tracef("call0 result: %+v", result)
+ if result.err == nil {
+ return result.resp, nil
+ }
+
+ // Step 2: check error, if can't use next addr, return error
+ // canUseNextAddr means can try next addr, contains ErrNoConnection, ErrNoResolver, ErrNoDestAddress => (feredirect && use next cached addr)
+ if !result.canUseNextAddr {
+ return nil, result.err
+ }
+
+ // Step 3: if set master addr, redirect to master
+ // redirect to master
+ if result.masterAddr != "" {
+ masterAddr := result.masterAddr
+ log.Infof("switch to master %s", masterAddr)
+
+ var err error
+ client, ok := rpc.getClient(masterAddr)
+ if ok {
+ masterClient = client
+ } else {
+ masterClient, err = newSingleFeClient(masterAddr)
+ if err != nil {
+ return nil, xerror.Wrapf(err, xerror.RPC, "NewFeClient [%s] error: %v", masterAddr, err)
+ }
+ }
+ rpc.updateMasterClient(masterClient)
+ return r.call()
+ }
+
+ // Step 4: try all cached fe clients
+ if r.notriedClients == nil {
+ r.notriedClients = rpc.getClients()
+ }
+ delete(r.notriedClients, masterClient.Address())
+ if len(r.notriedClients) == 0 {
+ return nil, result.err
+ }
+ // get first notried client
+ var client IFeRpc
+ for _, client = range r.notriedClients {
+ break
+ }
+ // because call0 failed, so original masterClient is not master now, set client as masterClient for retry
+ rpc.updateMasterClient(client)
+ return r.call()
+}
+
+func (rpc *FeRpc) callWithMasterRedirect(caller callerType) (resultType, error) {
+ r := &retryWithMasterRedirectAndCachedClientsRpc{
+ rpc: rpc,
+ caller: caller,
+ }
+ return r.call()
+}
+
+func convertResult[T any](result any, err error) (*T, error) {
+ if result == nil {
+ return nil, err
+ }
+
+ return result.(*T), err
+}
+
+func (rpc *FeRpc) BeginTransaction(spec *base.Spec, label string, tableIds []int64) (*festruct.TBeginTxnResult_, error) {
+ // return rpc.masterClient.BeginTransaction(spec, label, tableIds)
+ caller := func(client IFeRpc) (resultType, error) {
+ return client.BeginTransaction(spec, label, tableIds)
+ }
+ result, err := rpc.callWithMasterRedirect(caller)
+ return convertResult[festruct.TBeginTxnResult_](result, err)
+}
+
+func (rpc *FeRpc) BeginTransactionForTxnInsert(spec *base.Spec, label string, tableIds []int64, stidNum int64) (*festruct.TBeginTxnResult_, error) {
+ // return rpc.masterClient.BeginTransactionForTxnInsert(spec, label, tableIds, stidNum)
+ caller := func(client IFeRpc) (resultType, error) {
+ return client.BeginTransactionForTxnInsert(spec, label, tableIds, stidNum)
+ }
+ result, err := rpc.callWithMasterRedirect(caller)
+ return convertResult[festruct.TBeginTxnResult_](result, err)
+}
+
+func (rpc *FeRpc) CommitTransaction(spec *base.Spec, txnId int64, commitInfos []*festruct_types.TTabletCommitInfo) (*festruct.TCommitTxnResult_, error) {
+ // return rpc.masterClient.CommitTransaction(spec, txnId, commitInfos)
+ caller := func(client IFeRpc) (resultType, error) {
+ return client.CommitTransaction(spec, txnId, commitInfos)
+ }
+ result, err := rpc.callWithMasterRedirect(caller)
+ return convertResult[festruct.TCommitTxnResult_](result, err)
+}
+
+func (rpc *FeRpc) CommitTransactionForTxnInsert(spec *base.Spec, txnId int64, isTxnInsert bool, subTxnInfos []*festruct.TSubTxnInfo) (*festruct.TCommitTxnResult_, error) {
+ // return rpc.masterClient.CommitTransactionForTxnInsert(spec, txnId, commitInfos, subTxnInfos)
+ caller := func(client IFeRpc) (resultType, error) {
+ return client.CommitTransactionForTxnInsert(spec, txnId, isTxnInsert, subTxnInfos)
+ }
+ result, err := rpc.callWithMasterRedirect(caller)
+ return convertResult[festruct.TCommitTxnResult_](result, err)
+}
+
+func (rpc *FeRpc) RollbackTransaction(spec *base.Spec, txnId int64) (*festruct.TRollbackTxnResult_, error) {
+ // return rpc.masterClient.RollbackTransaction(spec, txnId)
+ caller := func(client IFeRpc) (resultType, error) {
+ return client.RollbackTransaction(spec, txnId)
+ }
+ result, err := rpc.callWithMasterRedirect(caller)
+ return convertResult[festruct.TRollbackTxnResult_](result, err)
+}
+
+func (rpc *FeRpc) GetBinlog(spec *base.Spec, commitSeq int64) (*festruct.TGetBinlogResult_, error) {
+ // return rpc.masterClient.GetBinlog(spec, commitSeq)
+ caller := func(client IFeRpc) (resultType, error) {
+ return client.GetBinlog(spec, commitSeq)
+ }
+ result, err := rpc.callWithMasterRedirect(caller)
+ return convertResult[festruct.TGetBinlogResult_](result, err)
+}
+
+func (rpc *FeRpc) GetBinlogLag(spec *base.Spec, commitSeq int64) (*festruct.TGetBinlogLagResult_, error) {
+ // return rpc.masterClient.GetBinlogLag(spec, commitSeq)
+ caller := func(client IFeRpc) (resultType, error) {
+ return client.GetBinlogLag(spec, commitSeq)
+ }
+ result, err := rpc.callWithMasterRedirect(caller)
+ return convertResult[festruct.TGetBinlogLagResult_](result, err)
+}
+
+func (rpc *FeRpc) GetSnapshot(spec *base.Spec, labelName string, compress bool) (*festruct.TGetSnapshotResult_, error) {
+ // return rpc.masterClient.GetSnapshot(spec, labelName)
+ caller := func(client IFeRpc) (resultType, error) {
+ return client.GetSnapshot(spec, labelName, compress)
+ }
+ result, err := rpc.callWithMasterRedirect(caller)
+ return convertResult[festruct.TGetSnapshotResult_](result, err)
+}
+
+func (rpc *FeRpc) RestoreSnapshot(spec *base.Spec, req *RestoreSnapshotRequest) (*festruct.TRestoreSnapshotResult_, error) {
+ caller := func(client IFeRpc) (resultType, error) {
+ return client.RestoreSnapshot(spec, req)
+ }
+ result, err := rpc.callWithMasterRedirect(caller)
+ return convertResult[festruct.TRestoreSnapshotResult_](result, err)
+}
+
+func (rpc *FeRpc) GetMasterToken(spec *base.Spec) (*festruct.TGetMasterTokenResult_, error) {
+ // return rpc.masterClient.GetMasterToken(spec)
+ caller := func(client IFeRpc) (resultType, error) {
+ return client.GetMasterToken(spec)
+ }
+ result, err := rpc.callWithMasterRedirect(caller)
+ return convertResult[festruct.TGetMasterTokenResult_](result, err)
+}
+
+func (rpc *FeRpc) GetDbMeta(spec *base.Spec) (*festruct.TGetMetaResult_, error) {
+ caller := func(client IFeRpc) (resultType, error) {
+ return client.GetDbMeta(spec)
+ }
+ result, err := rpc.callWithMasterRedirect(caller)
+ return convertResult[festruct.TGetMetaResult_](result, err)
+}
+
+func (rpc *FeRpc) GetTableMeta(spec *base.Spec, tableIds []int64) (*festruct.TGetMetaResult_, error) {
+ caller := func(client IFeRpc) (resultType, error) {
+ return client.GetTableMeta(spec, tableIds)
+ }
+ result, err := rpc.callWithMasterRedirect(caller)
+ return convertResult[festruct.TGetMetaResult_](result, err)
+}
+
+func (rpc *FeRpc) GetBackends(spec *base.Spec) (*festruct.TGetBackendMetaResult_, error) {
+ caller := func(client IFeRpc) (resultType, error) {
+ return client.GetBackends(spec)
+ }
+ result, err := rpc.callWithMasterRedirect(caller)
+ return convertResult[festruct.TGetBackendMetaResult_](result, err)
}
type Request interface {
@@ -46,6 +484,27 @@ func setAuthInfo[T Request](request T, spec *base.Spec) {
request.SetDb(&spec.Database)
}
+type singleFeClient struct {
+ addr string
+ client feservice.Client
+}
+
+func newSingleFeClient(addr string) (*singleFeClient, error) {
+ // create kitex FrontendService client
+ if fe_client, err := feservice.NewClient("FrontendService", client.WithHostPorts(addr), client.WithConnectTimeout(connectTimeout), client.WithRPCTimeout(rpcTimeout)); err != nil {
+ return nil, xerror.Wrapf(err, xerror.RPC, "NewFeClient error: %v, addr: %s", err, addr)
+ } else {
+ return &singleFeClient{
+ addr: addr,
+ client: fe_client,
+ }, nil
+ }
+}
+
+func (rpc *singleFeClient) Address() string {
+ return rpc.addr
+}
+
// begin transaction
//
// struct TBeginTxnRequest {
@@ -62,8 +521,8 @@ func setAuthInfo[T Request](request T, spec *base.Spec) {
// 10: optional Types.TUniqueId request_id
// 11: optional string token
// }
-func (rpc *FeRpc) BeginTransaction(spec *base.Spec, label string, tableIds []int64) (*festruct.TBeginTxnResult_, error) {
- log.Debugf("BeginTransaction spec: %s, label: %s, tableIds: %v", spec, label, tableIds)
+func (rpc *singleFeClient) BeginTransaction(spec *base.Spec, label string, tableIds []int64) (*festruct.TBeginTxnResult_, error) {
+ log.Debugf("Call BeginTransaction, addr: %s, spec: %s, label: %s, tableIds: %v", rpc.Address(), spec, label, tableIds)
client := rpc.client
req := &festruct.TBeginTxnRequest{
@@ -74,7 +533,26 @@ func (rpc *FeRpc) BeginTransaction(spec *base.Spec, label string, tableIds []int
log.Debugf("BeginTransaction user %s, label: %s, tableIds: %v", req.GetUser(), label, tableIds)
if result, err := client.BeginTxn(context.Background(), req); err != nil {
- return nil, xerror.Wrapf(err, xerror.Normal, "BeginTransaction error: %v, req: %+v", err, req)
+ return nil, xerror.Wrapf(err, xerror.RPC, "BeginTransaction error: %v, req: %+v", err, req)
+ } else {
+ return result, nil
+ }
+}
+
+func (rpc *singleFeClient) BeginTransactionForTxnInsert(spec *base.Spec, label string, tableIds []int64, stidNum int64) (*festruct.TBeginTxnResult_, error) {
+ log.Debugf("Call BeginTransactionForTxnInsert, addr: %s, spec: %s, label: %s, tableIds: %v", rpc.Address(), spec, label, tableIds)
+
+ client := rpc.client
+ req := &festruct.TBeginTxnRequest{
+ Label: &label,
+ }
+ setAuthInfo(req, spec)
+ req.TableIds = tableIds
+ req.SubTxnNum = stidNum
+
+ log.Debugf("BeginTransactionForTxnInsert user %s, label: %s, tableIds: %v", req.GetUser(), label, tableIds)
+ if result, err := client.BeginTxn(context.Background(), req); err != nil {
+ return nil, xerror.Wrapf(err, xerror.RPC, "BeginTransactionForTxnInsert error: %v, req: %+v", err, req)
} else {
return result, nil
}
@@ -94,8 +572,8 @@ func (rpc *FeRpc) BeginTransaction(spec *base.Spec, label string, tableIds []int
// 11: optional string token
// 12: optional i64 db_id
// }
-func (rpc *FeRpc) CommitTransaction(spec *base.Spec, txnId int64, commitInfos []*festruct_types.TTabletCommitInfo) (*festruct.TCommitTxnResult_, error) {
- log.Debugf("CommitTransaction spec: %s, txnId: %d, commitInfos: %v", spec, txnId, commitInfos)
+func (rpc *singleFeClient) CommitTransaction(spec *base.Spec, txnId int64, commitInfos []*festruct_types.TTabletCommitInfo) (*festruct.TCommitTxnResult_, error) {
+ log.Debugf("Call CommitTransaction, addr: %s spec: %s, txnId: %d, commitInfos: %v", rpc.Address(), spec, txnId, commitInfos)
client := rpc.client
req := &festruct.TCommitTxnRequest{}
@@ -103,8 +581,25 @@ func (rpc *FeRpc) CommitTransaction(spec *base.Spec, txnId int64, commitInfos []
req.TxnId = &txnId
req.CommitInfos = commitInfos
- if result, err := client.CommitTxn(context.Background(), req); err != nil {
- return nil, xerror.Wrapf(err, xerror.Normal, "CommitTransaction error: %v, req: %+v", err, req)
+ if result, err := client.CommitTxn(context.Background(), req, callopt.WithRPCTimeout(commitTxnTimeout)); err != nil {
+ return nil, xerror.Wrapf(err, xerror.RPC, "CommitTransaction error: %v, req: %+v", err, req)
+ } else {
+ return result, nil
+ }
+}
+
+func (rpc *singleFeClient) CommitTransactionForTxnInsert(spec *base.Spec, txnId int64, isTxnInsert bool, subTxnInfos []*festruct.TSubTxnInfo) (*festruct.TCommitTxnResult_, error) {
+ log.Debugf("Call CommitTransactionForTxnInsert, addr: %s spec: %s, txnId: %d, subTxnInfos: %v", rpc.Address(), spec, txnId, subTxnInfos)
+
+ client := rpc.client
+ req := &festruct.TCommitTxnRequest{}
+ setAuthInfo(req, spec)
+ req.TxnId = &txnId
+ req.TxnInsert = &isTxnInsert
+ req.SubTxnInfos = subTxnInfos
+
+ if result, err := client.CommitTxn(context.Background(), req, callopt.WithRPCTimeout(commitTxnTimeout)); err != nil {
+ return nil, xerror.Wrapf(err, xerror.RPC, "CommitTransactionForTxnInsert error: %v, req: %+v", err, req)
} else {
return result, nil
}
@@ -123,8 +618,8 @@ func (rpc *FeRpc) CommitTransaction(spec *base.Spec, txnId int64, commitInfos []
// 11: optional string token
// 12: optional i64 db_id
// }
-func (rpc *FeRpc) RollbackTransaction(spec *base.Spec, txnId int64) (*festruct.TRollbackTxnResult_, error) {
- log.Debugf("RollbackTransaction spec: %s, txnId: %d", spec, txnId)
+func (rpc *singleFeClient) RollbackTransaction(spec *base.Spec, txnId int64) (*festruct.TRollbackTxnResult_, error) {
+ log.Debugf("Call RollbackTransaction, addr: %s, spec: %s, txnId: %d", rpc.Address(), spec, txnId)
client := rpc.client
req := &festruct.TRollbackTxnRequest{}
@@ -132,7 +627,7 @@ func (rpc *FeRpc) RollbackTransaction(spec *base.Spec, txnId int64) (*festruct.T
req.TxnId = &txnId
if result, err := client.RollbackTxn(context.Background(), req); err != nil {
- return nil, xerror.Wrapf(err, xerror.Normal, "RollbackTransaction error: %v, req: %+v", err, req)
+ return nil, xerror.Wrapf(err, xerror.RPC, "RollbackTransaction error: %v, req: %+v", err, req)
} else {
return result, nil
}
@@ -148,8 +643,8 @@ func (rpc *FeRpc) RollbackTransaction(spec *base.Spec, txnId int64) (*festruct.T
// 7: optional string token
// 8: required i64 prev_commit_seq
// }
-func (rpc *FeRpc) GetBinlog(spec *base.Spec, commitSeq int64) (*festruct.TGetBinlogResult_, error) {
- log.Debugf("GetBinlog, spec: %s, commit seq: %d", spec, commitSeq)
+func (rpc *singleFeClient) GetBinlog(spec *base.Spec, commitSeq int64) (*festruct.TGetBinlogResult_, error) {
+ log.Debugf("Call GetBinlog, addr: %s, spec: %s, commit seq: %d", rpc.Address(), spec, commitSeq)
client := rpc.client
req := &festruct.TGetBinlogRequest{
@@ -167,14 +662,14 @@ func (rpc *FeRpc) GetBinlog(spec *base.Spec, commitSeq int64) (*festruct.TGetBin
log.Debugf("GetBinlog user %s, db %s, tableId %d, prev seq: %d", req.GetUser(), req.GetDb(),
req.GetTableId(), req.GetPrevCommitSeq())
if resp, err := client.GetBinlog(context.Background(), req); err != nil {
- return nil, xerror.Wrapf(err, xerror.Normal, "GetBinlog error: %v, req: %+v", err, req)
+ return nil, xerror.Wrapf(err, xerror.RPC, "GetBinlog error: %v, req: %+v", err, req)
} else {
return resp, nil
}
}
-func (rpc *FeRpc) GetBinlogLag(spec *base.Spec, commitSeq int64) (*festruct.TGetBinlogLagResult_, error) {
- log.Debugf("GetBinlogLag, spec: %s, commit seq: %d", spec, commitSeq)
+func (rpc *singleFeClient) GetBinlogLag(spec *base.Spec, commitSeq int64) (*festruct.TGetBinlogLagResult_, error) {
+ log.Debugf("Call GetBinlogLag, addr: %s, spec: %s, commit seq: %d", rpc.Address(), spec, commitSeq)
client := rpc.client
req := &festruct.TGetBinlogRequest{
@@ -193,7 +688,7 @@ func (rpc *FeRpc) GetBinlogLag(spec *base.Spec, commitSeq int64) (*festruct.TGet
log.Debugf("GetBinlog user %s, db %s, tableId %d, prev seq: %d", req.GetUser(), req.GetDb(),
req.GetTableId(), req.GetPrevCommitSeq())
if resp, err := client.GetBinlogLag(context.Background(), req); err != nil {
- return nil, xerror.Wrapf(err, xerror.Normal, "GetBinlogLag error: %v, req: %+v", err, req)
+ return nil, xerror.Wrapf(err, xerror.RPC, "GetBinlogLag error: %v, req: %+v", err, req)
} else {
return resp, nil
}
@@ -209,25 +704,27 @@ func (rpc *FeRpc) GetBinlogLag(spec *base.Spec, commitSeq int64) (*festruct.TGet
// 7: optional string label_name
// 8: optional string snapshot_name
// 9: optional TSnapshotType snapshot_type
+// 10: optional bool enable_compress
// }
-func (rpc *FeRpc) GetSnapshot(spec *base.Spec, labelName string) (*festruct.TGetSnapshotResult_, error) {
- log.Debugf("GetSnapshot %s, spec: %s", labelName, spec)
+func (rpc *singleFeClient) GetSnapshot(spec *base.Spec, labelName string, compress bool) (*festruct.TGetSnapshotResult_, error) {
+ log.Debugf("Call GetSnapshot, addr: %s, spec: %s, label: %s", rpc.Address(), spec, labelName)
client := rpc.client
snapshotType := festruct.TSnapshotType_LOCAL
snapshotName := ""
req := &festruct.TGetSnapshotRequest{
- Table: &spec.Table,
- LabelName: &labelName,
- SnapshotType: &snapshotType,
- SnapshotName: &snapshotName,
+ Table: &spec.Table,
+ LabelName: &labelName,
+ SnapshotType: &snapshotType,
+ SnapshotName: &snapshotName,
+ EnableCompress: &compress,
}
setAuthInfo(req, spec)
- log.Debugf("GetSnapshotRequest user %s, db %s, table %s, label name %s, snapshot name %s, snapshot type %d",
- req.GetUser(), req.GetDb(), req.GetTable(), req.GetLabelName(), req.GetSnapshotName(), req.GetSnapshotType())
+ log.Debugf("GetSnapshotRequest user %s, db %s, table %s, label name %s, snapshot name %s, snapshot type %d, enable compress %t",
+ req.GetUser(), req.GetDb(), req.GetTable(), req.GetLabelName(), req.GetSnapshotName(), req.GetSnapshotType(), req.GetEnableCompress())
if resp, err := client.GetSnapshot(context.Background(), req); err != nil {
- return nil, xerror.Wrapf(err, xerror.Normal, "GetSnapshot error: %v, req: %+v", err, req)
+ return nil, xerror.Wrapf(err, xerror.RPC, "GetSnapshot error: %v, req: %+v", err, req)
} else {
return resp, nil
}
@@ -246,39 +743,67 @@ func (rpc *FeRpc) GetSnapshot(spec *base.Spec, labelName string) (*festruct.TGet
// 10: optional map properties
// 11: optional binary meta
// 12: optional binary job_info
+// 13: optional bool clean_tables
+// 14: optional bool clean_partitions
+// 15: optional bool atomic_restore
+// 16: optional bool compressed
// }
//
// Restore Snapshot rpc
-func (rpc *FeRpc) RestoreSnapshot(spec *base.Spec, tableRefs []*festruct.TTableRef, label string, snapshotResult *festruct.TGetSnapshotResult_) (*festruct.TRestoreSnapshotResult_, error) {
- log.Debugf("RestoreSnapshot, spec: %s, snapshot result: %+v", spec, snapshotResult)
+func (rpc *singleFeClient) RestoreSnapshot(spec *base.Spec, restoreReq *RestoreSnapshotRequest) (*festruct.TRestoreSnapshotResult_, error) {
+ // NOTE: ignore meta, because it's too large
+ log.Debugf("Call RestoreSnapshot, addr: %s, spec: %s", rpc.Address(), spec)
client := rpc.client
repoName := "__keep_on_local__"
properties := make(map[string]string)
properties["reserve_replica"] = "true"
- // log.Infof("meta: %v", string(snapshotResult.GetMeta()))
+
+ // Support compressed snapshot
+ meta := restoreReq.SnapshotResult.GetMeta()
+ jobInfo := restoreReq.SnapshotResult.GetJobInfo()
+ if restoreReq.Compress {
+ var err error
+ meta, err = utils.GZIPCompress(meta)
+ if err != nil {
+ return nil, xerror.Wrapf(err, xerror.Normal, "gzip compress snapshot meta error: %v", err)
+ }
+ jobInfo, err = utils.GZIPCompress(jobInfo)
+ if err != nil {
+ return nil, xerror.Wrapf(err, xerror.Normal, "gzip compress snapshot job info error: %v", err)
+ }
+ }
+
req := &festruct.TRestoreSnapshotRequest{
- Table: &spec.Table,
- LabelName: &label, // TODO: check remove
- RepoName: &repoName, // TODO: check remove
- TableRefs: tableRefs,
- Properties: properties,
- Meta: snapshotResult.GetMeta(),
- JobInfo: snapshotResult.GetJobInfo(),
+ Table: &spec.Table,
+ LabelName: &restoreReq.SnapshotName,
+ RepoName: &repoName,
+ TableRefs: restoreReq.TableRefs,
+ Properties: properties,
+ Meta: meta,
+ JobInfo: jobInfo,
+ CleanTables: &restoreReq.CleanTables,
+ CleanPartitions: &restoreReq.CleanPartitions,
+ AtomicRestore: &restoreReq.AtomicRestore,
+ Compressed: utils.ThriftValueWrapper(restoreReq.Compress),
}
setAuthInfo(req, spec)
- log.Debugf("RestoreSnapshotRequest user %s, db %s, table %s, label name %s, properties %v, meta %v, job info %v",
- req.GetUser(), req.GetDb(), req.GetTable(), req.GetLabelName(), properties, snapshotResult.GetMeta(), snapshotResult.GetJobInfo())
+ // NOTE: ignore meta, because it's too large
+ log.Debugf("RestoreSnapshotRequest user %s, db %s, table %s, label name %s, properties %v, clean tables: %t, clean partitions: %t, atomic restore: %t, compressed: %t",
+ req.GetUser(), req.GetDb(), req.GetTable(), req.GetLabelName(), properties,
+ restoreReq.CleanTables, restoreReq.CleanPartitions, restoreReq.AtomicRestore,
+ req.GetCompressed())
+
if resp, err := client.RestoreSnapshot(context.Background(), req); err != nil {
- return nil, xerror.Wrapf(err, xerror.Normal, "RestoreSnapshot failed, req: %+v", req)
+ return nil, xerror.Wrapf(err, xerror.RPC, "RestoreSnapshot failed")
} else {
return resp, nil
}
}
-func (rpc *FeRpc) GetMasterToken(spec *base.Spec) (string, error) {
- log.Debugf("GetMasterToken, spec: %s", spec)
+func (rpc *singleFeClient) GetMasterToken(spec *base.Spec) (*festruct.TGetMasterTokenResult_, error) {
+ log.Debugf("Call GetMasterToken, addr: %s, spec: %s", rpc.Address(), spec)
client := rpc.client
req := &festruct.TGetMasterTokenRequest{
@@ -289,8 +814,65 @@ func (rpc *FeRpc) GetMasterToken(spec *base.Spec) (string, error) {
log.Debugf("GetMasterToken user: %s", *req.User)
if resp, err := client.GetMasterToken(context.Background(), req); err != nil {
- return "", xerror.Wrapf(err, xerror.Normal, "GetMasterToken failed, req: %+v", req)
+ return nil, xerror.Wrapf(err, xerror.RPC, "GetMasterToken failed, req: %+v", req)
} else {
- return resp.GetToken(), nil
+ return resp, nil
+ }
+}
+
+func (rpc *singleFeClient) getMeta(spec *base.Spec, reqTables []*festruct.TGetMetaTable) (*festruct.TGetMetaResult_, error) {
+ client := rpc.client
+
+ reqDb := festruct.NewTGetMetaDB() // festruct.NewTGetMetaTable()
+ reqDb.Id = &spec.DbId
+ reqDb.SetTables(reqTables)
+
+ req := &festruct.TGetMetaRequest{
+ User: &spec.User,
+ Passwd: &spec.Password,
+ Db: reqDb,
+ }
+
+ if resp, err := client.GetMeta(context.Background(), req); err != nil {
+ return nil, xerror.Wrapf(err, xerror.RPC, "GetMeta failed, req: %+v", req)
+ } else {
+ return resp, nil
+ }
+}
+
+func (rpc *singleFeClient) GetDbMeta(spec *base.Spec) (*festruct.TGetMetaResult_, error) {
+ log.Debugf("GetMetaDb, addr: %s, spec: %s", rpc.Address(), spec)
+
+ return rpc.getMeta(spec, nil)
+}
+
+func (rpc *singleFeClient) GetTableMeta(spec *base.Spec, tableIds []int64) (*festruct.TGetMetaResult_, error) {
+ log.Debugf("GetMetaTable, addr: %s, tableIds: %v", rpc.Address(), tableIds)
+
+ reqTables := make([]*festruct.TGetMetaTable, 0, len(tableIds))
+ for _, tableId := range tableIds {
+ tableId := tableId
+ reqTable := festruct.NewTGetMetaTable()
+ reqTable.Id = &tableId
+ reqTables = append(reqTables, reqTable)
+ }
+
+ return rpc.getMeta(spec, reqTables)
+}
+
+func (rpc *singleFeClient) GetBackends(spec *base.Spec) (*festruct.TGetBackendMetaResult_, error) {
+ log.Debugf("GetBackends, addr: %s, spec: %s", rpc.Address(), spec)
+
+ client := rpc.client
+ req := &festruct.TGetBackendMetaRequest{
+ Cluster: &spec.Cluster,
+ User: &spec.User,
+ Passwd: &spec.Password,
+ }
+
+ if resp, err := client.GetBackendMeta(context.Background(), req); err != nil {
+ return nil, xerror.Wrapf(err, xerror.RPC, "GetBackendMeta failed, req: %+v", req)
+ } else {
+ return resp, nil
}
}
diff --git a/pkg/rpc/kitex_gen/agentservice/AgentService.go b/pkg/rpc/kitex_gen/agentservice/AgentService.go
index 2dfc8dc2..884aea93 100644
--- a/pkg/rpc/kitex_gen/agentservice/AgentService.go
+++ b/pkg/rpc/kitex_gen/agentservice/AgentService.go
@@ -1,4 +1,4 @@
-// Code generated by thriftgo (0.2.7). DO NOT EDIT.
+// Code generated by thriftgo (0.3.13). DO NOT EDIT.
package agentservice
@@ -105,6 +105,78 @@ func (p *TTabletType) Value() (driver.Value, error) {
return int64(*p), nil
}
+type TObjStorageType int64
+
+const (
+ TObjStorageType_UNKNOWN TObjStorageType = 0
+ TObjStorageType_AWS TObjStorageType = 1
+ TObjStorageType_AZURE TObjStorageType = 2
+ TObjStorageType_BOS TObjStorageType = 3
+ TObjStorageType_COS TObjStorageType = 4
+ TObjStorageType_OBS TObjStorageType = 5
+ TObjStorageType_OSS TObjStorageType = 6
+ TObjStorageType_GCP TObjStorageType = 7
+)
+
+func (p TObjStorageType) String() string {
+ switch p {
+ case TObjStorageType_UNKNOWN:
+ return "UNKNOWN"
+ case TObjStorageType_AWS:
+ return "AWS"
+ case TObjStorageType_AZURE:
+ return "AZURE"
+ case TObjStorageType_BOS:
+ return "BOS"
+ case TObjStorageType_COS:
+ return "COS"
+ case TObjStorageType_OBS:
+ return "OBS"
+ case TObjStorageType_OSS:
+ return "OSS"
+ case TObjStorageType_GCP:
+ return "GCP"
+ }
+ return ""
+}
+
+func TObjStorageTypeFromString(s string) (TObjStorageType, error) {
+ switch s {
+ case "UNKNOWN":
+ return TObjStorageType_UNKNOWN, nil
+ case "AWS":
+ return TObjStorageType_AWS, nil
+ case "AZURE":
+ return TObjStorageType_AZURE, nil
+ case "BOS":
+ return TObjStorageType_BOS, nil
+ case "COS":
+ return TObjStorageType_COS, nil
+ case "OBS":
+ return TObjStorageType_OBS, nil
+ case "OSS":
+ return TObjStorageType_OSS, nil
+ case "GCP":
+ return TObjStorageType_GCP, nil
+ }
+ return TObjStorageType(0), fmt.Errorf("not a valid TObjStorageType string")
+}
+
+func TObjStorageTypePtr(v TObjStorageType) *TObjStorageType { return &v }
+func (p *TObjStorageType) Scan(value interface{}) (err error) {
+ var result sql.NullInt64
+ err = result.Scan(value)
+ *p = TObjStorageType(result.Int64)
+ return
+}
+
+func (p *TObjStorageType) Value() (driver.Value, error) {
+ if p == nil {
+ return nil, nil
+ }
+ return int64(*p), nil
+}
+
type TCompressionType int64
const (
@@ -182,6 +254,55 @@ func (p *TCompressionType) Value() (driver.Value, error) {
return int64(*p), nil
}
+type TInvertedIndexStorageFormat int64
+
+const (
+ TInvertedIndexStorageFormat_DEFAULT TInvertedIndexStorageFormat = 0
+ TInvertedIndexStorageFormat_V1 TInvertedIndexStorageFormat = 1
+ TInvertedIndexStorageFormat_V2 TInvertedIndexStorageFormat = 2
+)
+
+func (p TInvertedIndexStorageFormat) String() string {
+ switch p {
+ case TInvertedIndexStorageFormat_DEFAULT:
+ return "DEFAULT"
+ case TInvertedIndexStorageFormat_V1:
+ return "V1"
+ case TInvertedIndexStorageFormat_V2:
+ return "V2"
+ }
+ return ""
+}
+
+func TInvertedIndexStorageFormatFromString(s string) (TInvertedIndexStorageFormat, error) {
+ switch s {
+ case "DEFAULT":
+ return TInvertedIndexStorageFormat_DEFAULT, nil
+ case "V1":
+ return TInvertedIndexStorageFormat_V1, nil
+ case "V2":
+ return TInvertedIndexStorageFormat_V2, nil
+ }
+ return TInvertedIndexStorageFormat(0), fmt.Errorf("not a valid TInvertedIndexStorageFormat string")
+}
+
+func TInvertedIndexStorageFormatPtr(v TInvertedIndexStorageFormat) *TInvertedIndexStorageFormat {
+ return &v
+}
+func (p *TInvertedIndexStorageFormat) Scan(value interface{}) (err error) {
+ var result sql.NullInt64
+ err = result.Scan(value)
+ *p = TInvertedIndexStorageFormat(result.Int64)
+ return
+}
+
+func (p *TInvertedIndexStorageFormat) Value() (driver.Value, error) {
+ if p == nil {
+ return nil, nil
+ }
+ return int64(*p), nil
+}
+
type TAlterTabletType int64
const (
@@ -369,6 +490,11 @@ type TTabletSchema struct {
StoreRowColumn bool `thrift:"store_row_column,16,optional" frugal:"16,optional,bool" json:"store_row_column,omitempty"`
EnableSingleReplicaCompaction bool `thrift:"enable_single_replica_compaction,17,optional" frugal:"17,optional,bool" json:"enable_single_replica_compaction,omitempty"`
SkipWriteIndexOnLoad bool `thrift:"skip_write_index_on_load,18,optional" frugal:"18,optional,bool" json:"skip_write_index_on_load,omitempty"`
+ ClusterKeyIdxes []int32 `thrift:"cluster_key_idxes,19,optional" frugal:"19,optional,list" json:"cluster_key_idxes,omitempty"`
+ RowStoreColCids []int32 `thrift:"row_store_col_cids,20,optional" frugal:"20,optional,list" json:"row_store_col_cids,omitempty"`
+ RowStorePageSize int64 `thrift:"row_store_page_size,21,optional" frugal:"21,optional,i64" json:"row_store_page_size,omitempty"`
+ VariantEnableFlattenNested bool `thrift:"variant_enable_flatten_nested,22,optional" frugal:"22,optional,bool" json:"variant_enable_flatten_nested,omitempty"`
+ StoragePageSize int64 `thrift:"storage_page_size,23,optional" frugal:"23,optional,i64" json:"storage_page_size,omitempty"`
}
func NewTTabletSchema() *TTabletSchema {
@@ -381,20 +507,23 @@ func NewTTabletSchema() *TTabletSchema {
StoreRowColumn: false,
EnableSingleReplicaCompaction: false,
SkipWriteIndexOnLoad: false,
+ RowStorePageSize: 16384,
+ VariantEnableFlattenNested: false,
+ StoragePageSize: 65536,
}
}
func (p *TTabletSchema) InitDefault() {
- *p = TTabletSchema{
-
- DeleteSignIdx: -1,
- SequenceColIdx: -1,
- VersionColIdx: -1,
- IsDynamicSchema: false,
- StoreRowColumn: false,
- EnableSingleReplicaCompaction: false,
- SkipWriteIndexOnLoad: false,
- }
+ p.DeleteSignIdx = -1
+ p.SequenceColIdx = -1
+ p.VersionColIdx = -1
+ p.IsDynamicSchema = false
+ p.StoreRowColumn = false
+ p.EnableSingleReplicaCompaction = false
+ p.SkipWriteIndexOnLoad = false
+ p.RowStorePageSize = 16384
+ p.VariantEnableFlattenNested = false
+ p.StoragePageSize = 65536
}
func (p *TTabletSchema) GetShortKeyColumnCount() (v int16) {
@@ -533,6 +662,51 @@ func (p *TTabletSchema) GetSkipWriteIndexOnLoad() (v bool) {
}
return p.SkipWriteIndexOnLoad
}
+
+var TTabletSchema_ClusterKeyIdxes_DEFAULT []int32
+
+func (p *TTabletSchema) GetClusterKeyIdxes() (v []int32) {
+ if !p.IsSetClusterKeyIdxes() {
+ return TTabletSchema_ClusterKeyIdxes_DEFAULT
+ }
+ return p.ClusterKeyIdxes
+}
+
+var TTabletSchema_RowStoreColCids_DEFAULT []int32
+
+func (p *TTabletSchema) GetRowStoreColCids() (v []int32) {
+ if !p.IsSetRowStoreColCids() {
+ return TTabletSchema_RowStoreColCids_DEFAULT
+ }
+ return p.RowStoreColCids
+}
+
+var TTabletSchema_RowStorePageSize_DEFAULT int64 = 16384
+
+func (p *TTabletSchema) GetRowStorePageSize() (v int64) {
+ if !p.IsSetRowStorePageSize() {
+ return TTabletSchema_RowStorePageSize_DEFAULT
+ }
+ return p.RowStorePageSize
+}
+
+var TTabletSchema_VariantEnableFlattenNested_DEFAULT bool = false
+
+func (p *TTabletSchema) GetVariantEnableFlattenNested() (v bool) {
+ if !p.IsSetVariantEnableFlattenNested() {
+ return TTabletSchema_VariantEnableFlattenNested_DEFAULT
+ }
+ return p.VariantEnableFlattenNested
+}
+
+var TTabletSchema_StoragePageSize_DEFAULT int64 = 65536
+
+func (p *TTabletSchema) GetStoragePageSize() (v int64) {
+ if !p.IsSetStoragePageSize() {
+ return TTabletSchema_StoragePageSize_DEFAULT
+ }
+ return p.StoragePageSize
+}
func (p *TTabletSchema) SetShortKeyColumnCount(val int16) {
p.ShortKeyColumnCount = val
}
@@ -587,6 +761,21 @@ func (p *TTabletSchema) SetEnableSingleReplicaCompaction(val bool) {
func (p *TTabletSchema) SetSkipWriteIndexOnLoad(val bool) {
p.SkipWriteIndexOnLoad = val
}
+func (p *TTabletSchema) SetClusterKeyIdxes(val []int32) {
+ p.ClusterKeyIdxes = val
+}
+func (p *TTabletSchema) SetRowStoreColCids(val []int32) {
+ p.RowStoreColCids = val
+}
+func (p *TTabletSchema) SetRowStorePageSize(val int64) {
+ p.RowStorePageSize = val
+}
+func (p *TTabletSchema) SetVariantEnableFlattenNested(val bool) {
+ p.VariantEnableFlattenNested = val
+}
+func (p *TTabletSchema) SetStoragePageSize(val int64) {
+ p.StoragePageSize = val
+}
var fieldIDToName_TTabletSchema = map[int16]string{
1: "short_key_column_count",
@@ -607,6 +796,11 @@ var fieldIDToName_TTabletSchema = map[int16]string{
16: "store_row_column",
17: "enable_single_replica_compaction",
18: "skip_write_index_on_load",
+ 19: "cluster_key_idxes",
+ 20: "row_store_col_cids",
+ 21: "row_store_page_size",
+ 22: "variant_enable_flatten_nested",
+ 23: "storage_page_size",
}
func (p *TTabletSchema) IsSetBloomFilterFpp() bool {
@@ -661,6 +855,26 @@ func (p *TTabletSchema) IsSetSkipWriteIndexOnLoad() bool {
return p.SkipWriteIndexOnLoad != TTabletSchema_SkipWriteIndexOnLoad_DEFAULT
}
+func (p *TTabletSchema) IsSetClusterKeyIdxes() bool {
+ return p.ClusterKeyIdxes != nil
+}
+
+func (p *TTabletSchema) IsSetRowStoreColCids() bool {
+ return p.RowStoreColCids != nil
+}
+
+func (p *TTabletSchema) IsSetRowStorePageSize() bool {
+ return p.RowStorePageSize != TTabletSchema_RowStorePageSize_DEFAULT
+}
+
+func (p *TTabletSchema) IsSetVariantEnableFlattenNested() bool {
+ return p.VariantEnableFlattenNested != TTabletSchema_VariantEnableFlattenNested_DEFAULT
+}
+
+func (p *TTabletSchema) IsSetStoragePageSize() bool {
+ return p.StoragePageSize != TTabletSchema_StoragePageSize_DEFAULT
+}
+
func (p *TTabletSchema) Read(iprot thrift.TProtocol) (err error) {
var fieldTypeId thrift.TType
@@ -691,10 +905,8 @@ func (p *TTabletSchema) Read(iprot thrift.TProtocol) (err error) {
goto ReadFieldError
}
issetShortKeyColumnCount = true
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 2:
if fieldTypeId == thrift.I32 {
@@ -702,10 +914,8 @@ func (p *TTabletSchema) Read(iprot thrift.TProtocol) (err error) {
goto ReadFieldError
}
issetSchemaHash = true
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 3:
if fieldTypeId == thrift.I32 {
@@ -713,10 +923,8 @@ func (p *TTabletSchema) Read(iprot thrift.TProtocol) (err error) {
goto ReadFieldError
}
issetKeysType = true
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 4:
if fieldTypeId == thrift.I32 {
@@ -724,10 +932,8 @@ func (p *TTabletSchema) Read(iprot thrift.TProtocol) (err error) {
goto ReadFieldError
}
issetStorageType = true
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 5:
if fieldTypeId == thrift.LIST {
@@ -735,147 +941,158 @@ func (p *TTabletSchema) Read(iprot thrift.TProtocol) (err error) {
goto ReadFieldError
}
issetColumns = true
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 6:
if fieldTypeId == thrift.DOUBLE {
if err = p.ReadField6(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 7:
if fieldTypeId == thrift.LIST {
if err = p.ReadField7(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 8:
if fieldTypeId == thrift.BOOL {
if err = p.ReadField8(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 9:
if fieldTypeId == thrift.I32 {
if err = p.ReadField9(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 10:
if fieldTypeId == thrift.I32 {
if err = p.ReadField10(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 11:
if fieldTypeId == thrift.I32 {
if err = p.ReadField11(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 12:
if fieldTypeId == thrift.I32 {
if err = p.ReadField12(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 13:
if fieldTypeId == thrift.BOOL {
if err = p.ReadField13(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 14:
if fieldTypeId == thrift.I32 {
if err = p.ReadField14(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 15:
if fieldTypeId == thrift.BOOL {
if err = p.ReadField15(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 16:
if fieldTypeId == thrift.BOOL {
if err = p.ReadField16(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 17:
if fieldTypeId == thrift.BOOL {
if err = p.ReadField17(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 18:
if fieldTypeId == thrift.BOOL {
if err = p.ReadField18(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ case 19:
+ if fieldTypeId == thrift.LIST {
+ if err = p.ReadField19(iprot); err != nil {
+ goto ReadFieldError
+ }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ case 20:
+ if fieldTypeId == thrift.LIST {
+ if err = p.ReadField20(iprot); err != nil {
+ goto ReadFieldError
+ }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ case 21:
+ if fieldTypeId == thrift.I64 {
+ if err = p.ReadField21(iprot); err != nil {
+ goto ReadFieldError
+ }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ case 22:
+ if fieldTypeId == thrift.BOOL {
+ if err = p.ReadField22(iprot); err != nil {
+ goto ReadFieldError
+ }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ case 23:
+ if fieldTypeId == thrift.I64 {
+ if err = p.ReadField23(iprot); err != nil {
+ goto ReadFieldError
}
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
default:
if err = iprot.Skip(fieldTypeId); err != nil {
goto SkipFieldError
}
}
-
if err = iprot.ReadFieldEnd(); err != nil {
goto ReadFieldEndError
}
@@ -927,187 +1144,305 @@ RequiredFieldNotSetError:
}
func (p *TTabletSchema) ReadField1(iprot thrift.TProtocol) error {
+
+ var _field int16
if v, err := iprot.ReadI16(); err != nil {
return err
} else {
- p.ShortKeyColumnCount = v
+ _field = v
}
+ p.ShortKeyColumnCount = _field
return nil
}
-
func (p *TTabletSchema) ReadField2(iprot thrift.TProtocol) error {
+
+ var _field types.TSchemaHash
if v, err := iprot.ReadI32(); err != nil {
return err
} else {
- p.SchemaHash = v
+ _field = v
}
+ p.SchemaHash = _field
return nil
}
-
func (p *TTabletSchema) ReadField3(iprot thrift.TProtocol) error {
+
+ var _field types.TKeysType
if v, err := iprot.ReadI32(); err != nil {
return err
} else {
- p.KeysType = types.TKeysType(v)
+ _field = types.TKeysType(v)
}
+ p.KeysType = _field
return nil
}
-
func (p *TTabletSchema) ReadField4(iprot thrift.TProtocol) error {
+
+ var _field types.TStorageType
if v, err := iprot.ReadI32(); err != nil {
return err
} else {
- p.StorageType = types.TStorageType(v)
+ _field = types.TStorageType(v)
}
+ p.StorageType = _field
return nil
}
-
func (p *TTabletSchema) ReadField5(iprot thrift.TProtocol) error {
_, size, err := iprot.ReadListBegin()
if err != nil {
return err
}
- p.Columns = make([]*descriptors.TColumn, 0, size)
+ _field := make([]*descriptors.TColumn, 0, size)
+ values := make([]descriptors.TColumn, size)
for i := 0; i < size; i++ {
- _elem := descriptors.NewTColumn()
+ _elem := &values[i]
+ _elem.InitDefault()
+
if err := _elem.Read(iprot); err != nil {
return err
}
- p.Columns = append(p.Columns, _elem)
+ _field = append(_field, _elem)
}
if err := iprot.ReadListEnd(); err != nil {
return err
}
+ p.Columns = _field
return nil
}
-
func (p *TTabletSchema) ReadField6(iprot thrift.TProtocol) error {
+
+ var _field *float64
if v, err := iprot.ReadDouble(); err != nil {
return err
} else {
- p.BloomFilterFpp = &v
+ _field = &v
}
+ p.BloomFilterFpp = _field
return nil
}
-
func (p *TTabletSchema) ReadField7(iprot thrift.TProtocol) error {
_, size, err := iprot.ReadListBegin()
if err != nil {
return err
}
- p.Indexes = make([]*descriptors.TOlapTableIndex, 0, size)
+ _field := make([]*descriptors.TOlapTableIndex, 0, size)
+ values := make([]descriptors.TOlapTableIndex, size)
for i := 0; i < size; i++ {
- _elem := descriptors.NewTOlapTableIndex()
+ _elem := &values[i]
+ _elem.InitDefault()
+
if err := _elem.Read(iprot); err != nil {
return err
}
- p.Indexes = append(p.Indexes, _elem)
+ _field = append(_field, _elem)
}
if err := iprot.ReadListEnd(); err != nil {
return err
}
+ p.Indexes = _field
return nil
}
-
func (p *TTabletSchema) ReadField8(iprot thrift.TProtocol) error {
+
+ var _field *bool
if v, err := iprot.ReadBool(); err != nil {
return err
} else {
- p.IsInMemory = &v
+ _field = &v
}
+ p.IsInMemory = _field
return nil
}
-
func (p *TTabletSchema) ReadField9(iprot thrift.TProtocol) error {
+
+ var _field int32
if v, err := iprot.ReadI32(); err != nil {
return err
} else {
- p.DeleteSignIdx = v
+ _field = v
}
+ p.DeleteSignIdx = _field
return nil
}
-
func (p *TTabletSchema) ReadField10(iprot thrift.TProtocol) error {
+
+ var _field int32
if v, err := iprot.ReadI32(); err != nil {
return err
} else {
- p.SequenceColIdx = v
+ _field = v
}
+ p.SequenceColIdx = _field
return nil
}
-
func (p *TTabletSchema) ReadField11(iprot thrift.TProtocol) error {
+
+ var _field *types.TSortType
if v, err := iprot.ReadI32(); err != nil {
return err
} else {
tmp := types.TSortType(v)
- p.SortType = &tmp
+ _field = &tmp
}
+ p.SortType = _field
return nil
}
-
func (p *TTabletSchema) ReadField12(iprot thrift.TProtocol) error {
+
+ var _field *int32
if v, err := iprot.ReadI32(); err != nil {
return err
} else {
- p.SortColNum = &v
+ _field = &v
}
+ p.SortColNum = _field
return nil
}
-
func (p *TTabletSchema) ReadField13(iprot thrift.TProtocol) error {
+
+ var _field *bool
if v, err := iprot.ReadBool(); err != nil {
return err
} else {
- p.DisableAutoCompaction = &v
+ _field = &v
}
+ p.DisableAutoCompaction = _field
return nil
}
-
func (p *TTabletSchema) ReadField14(iprot thrift.TProtocol) error {
+
+ var _field int32
if v, err := iprot.ReadI32(); err != nil {
return err
} else {
- p.VersionColIdx = v
+ _field = v
}
+ p.VersionColIdx = _field
return nil
}
-
func (p *TTabletSchema) ReadField15(iprot thrift.TProtocol) error {
+
+ var _field bool
if v, err := iprot.ReadBool(); err != nil {
return err
} else {
- p.IsDynamicSchema = v
+ _field = v
}
+ p.IsDynamicSchema = _field
return nil
}
-
func (p *TTabletSchema) ReadField16(iprot thrift.TProtocol) error {
+
+ var _field bool
if v, err := iprot.ReadBool(); err != nil {
return err
} else {
- p.StoreRowColumn = v
+ _field = v
}
+ p.StoreRowColumn = _field
return nil
}
-
func (p *TTabletSchema) ReadField17(iprot thrift.TProtocol) error {
+
+ var _field bool
if v, err := iprot.ReadBool(); err != nil {
return err
} else {
- p.EnableSingleReplicaCompaction = v
+ _field = v
}
+ p.EnableSingleReplicaCompaction = _field
return nil
}
-
func (p *TTabletSchema) ReadField18(iprot thrift.TProtocol) error {
+
+ var _field bool
+ if v, err := iprot.ReadBool(); err != nil {
+ return err
+ } else {
+ _field = v
+ }
+ p.SkipWriteIndexOnLoad = _field
+ return nil
+}
+func (p *TTabletSchema) ReadField19(iprot thrift.TProtocol) error {
+ _, size, err := iprot.ReadListBegin()
+ if err != nil {
+ return err
+ }
+ _field := make([]int32, 0, size)
+ for i := 0; i < size; i++ {
+
+ var _elem int32
+ if v, err := iprot.ReadI32(); err != nil {
+ return err
+ } else {
+ _elem = v
+ }
+
+ _field = append(_field, _elem)
+ }
+ if err := iprot.ReadListEnd(); err != nil {
+ return err
+ }
+ p.ClusterKeyIdxes = _field
+ return nil
+}
+func (p *TTabletSchema) ReadField20(iprot thrift.TProtocol) error {
+ _, size, err := iprot.ReadListBegin()
+ if err != nil {
+ return err
+ }
+ _field := make([]int32, 0, size)
+ for i := 0; i < size; i++ {
+
+ var _elem int32
+ if v, err := iprot.ReadI32(); err != nil {
+ return err
+ } else {
+ _elem = v
+ }
+
+ _field = append(_field, _elem)
+ }
+ if err := iprot.ReadListEnd(); err != nil {
+ return err
+ }
+ p.RowStoreColCids = _field
+ return nil
+}
+func (p *TTabletSchema) ReadField21(iprot thrift.TProtocol) error {
+
+ var _field int64
+ if v, err := iprot.ReadI64(); err != nil {
+ return err
+ } else {
+ _field = v
+ }
+ p.RowStorePageSize = _field
+ return nil
+}
+func (p *TTabletSchema) ReadField22(iprot thrift.TProtocol) error {
+
+ var _field bool
if v, err := iprot.ReadBool(); err != nil {
return err
} else {
- p.SkipWriteIndexOnLoad = v
+ _field = v
+ }
+ p.VariantEnableFlattenNested = _field
+ return nil
+}
+func (p *TTabletSchema) ReadField23(iprot thrift.TProtocol) error {
+
+ var _field int64
+ if v, err := iprot.ReadI64(); err != nil {
+ return err
+ } else {
+ _field = v
}
+ p.StoragePageSize = _field
return nil
}
@@ -1189,7 +1524,26 @@ func (p *TTabletSchema) Write(oprot thrift.TProtocol) (err error) {
fieldId = 18
goto WriteFieldError
}
-
+ if err = p.writeField19(oprot); err != nil {
+ fieldId = 19
+ goto WriteFieldError
+ }
+ if err = p.writeField20(oprot); err != nil {
+ fieldId = 20
+ goto WriteFieldError
+ }
+ if err = p.writeField21(oprot); err != nil {
+ fieldId = 21
+ goto WriteFieldError
+ }
+ if err = p.writeField22(oprot); err != nil {
+ fieldId = 22
+ goto WriteFieldError
+ }
+ if err = p.writeField23(oprot); err != nil {
+ fieldId = 23
+ goto WriteFieldError
+ }
}
if err = oprot.WriteFieldStop(); err != nil {
goto WriteFieldStopError
@@ -1556,11 +1910,123 @@ WriteFieldEndError:
return thrift.PrependError(fmt.Sprintf("%T write field 18 end error: ", p), err)
}
+func (p *TTabletSchema) writeField19(oprot thrift.TProtocol) (err error) {
+ if p.IsSetClusterKeyIdxes() {
+ if err = oprot.WriteFieldBegin("cluster_key_idxes", thrift.LIST, 19); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteListBegin(thrift.I32, len(p.ClusterKeyIdxes)); err != nil {
+ return err
+ }
+ for _, v := range p.ClusterKeyIdxes {
+ if err := oprot.WriteI32(v); err != nil {
+ return err
+ }
+ }
+ if err := oprot.WriteListEnd(); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 19 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 19 end error: ", p), err)
+}
+
+func (p *TTabletSchema) writeField20(oprot thrift.TProtocol) (err error) {
+ if p.IsSetRowStoreColCids() {
+ if err = oprot.WriteFieldBegin("row_store_col_cids", thrift.LIST, 20); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteListBegin(thrift.I32, len(p.RowStoreColCids)); err != nil {
+ return err
+ }
+ for _, v := range p.RowStoreColCids {
+ if err := oprot.WriteI32(v); err != nil {
+ return err
+ }
+ }
+ if err := oprot.WriteListEnd(); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 20 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 20 end error: ", p), err)
+}
+
+func (p *TTabletSchema) writeField21(oprot thrift.TProtocol) (err error) {
+ if p.IsSetRowStorePageSize() {
+ if err = oprot.WriteFieldBegin("row_store_page_size", thrift.I64, 21); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteI64(p.RowStorePageSize); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 21 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 21 end error: ", p), err)
+}
+
+func (p *TTabletSchema) writeField22(oprot thrift.TProtocol) (err error) {
+ if p.IsSetVariantEnableFlattenNested() {
+ if err = oprot.WriteFieldBegin("variant_enable_flatten_nested", thrift.BOOL, 22); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteBool(p.VariantEnableFlattenNested); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 22 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 22 end error: ", p), err)
+}
+
+func (p *TTabletSchema) writeField23(oprot thrift.TProtocol) (err error) {
+ if p.IsSetStoragePageSize() {
+ if err = oprot.WriteFieldBegin("storage_page_size", thrift.I64, 23); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteI64(p.StoragePageSize); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 23 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 23 end error: ", p), err)
+}
+
func (p *TTabletSchema) String() string {
if p == nil {
return ""
}
return fmt.Sprintf("TTabletSchema(%+v)", *p)
+
}
func (p *TTabletSchema) DeepEqual(ano *TTabletSchema) bool {
@@ -1623,6 +2089,21 @@ func (p *TTabletSchema) DeepEqual(ano *TTabletSchema) bool {
if !p.Field18DeepEqual(ano.SkipWriteIndexOnLoad) {
return false
}
+ if !p.Field19DeepEqual(ano.ClusterKeyIdxes) {
+ return false
+ }
+ if !p.Field20DeepEqual(ano.RowStoreColCids) {
+ return false
+ }
+ if !p.Field21DeepEqual(ano.RowStorePageSize) {
+ return false
+ }
+ if !p.Field22DeepEqual(ano.VariantEnableFlattenNested) {
+ return false
+ }
+ if !p.Field23DeepEqual(ano.StoragePageSize) {
+ return false
+ }
return true
}
@@ -1789,18 +2270,67 @@ func (p *TTabletSchema) Field18DeepEqual(src bool) bool {
}
return true
}
+func (p *TTabletSchema) Field19DeepEqual(src []int32) bool {
+
+ if len(p.ClusterKeyIdxes) != len(src) {
+ return false
+ }
+ for i, v := range p.ClusterKeyIdxes {
+ _src := src[i]
+ if v != _src {
+ return false
+ }
+ }
+ return true
+}
+func (p *TTabletSchema) Field20DeepEqual(src []int32) bool {
+
+ if len(p.RowStoreColCids) != len(src) {
+ return false
+ }
+ for i, v := range p.RowStoreColCids {
+ _src := src[i]
+ if v != _src {
+ return false
+ }
+ }
+ return true
+}
+func (p *TTabletSchema) Field21DeepEqual(src int64) bool {
+
+ if p.RowStorePageSize != src {
+ return false
+ }
+ return true
+}
+func (p *TTabletSchema) Field22DeepEqual(src bool) bool {
+
+ if p.VariantEnableFlattenNested != src {
+ return false
+ }
+ return true
+}
+func (p *TTabletSchema) Field23DeepEqual(src int64) bool {
+
+ if p.StoragePageSize != src {
+ return false
+ }
+ return true
+}
type TS3StorageParam struct {
- Endpoint *string `thrift:"endpoint,1,optional" frugal:"1,optional,string" json:"endpoint,omitempty"`
- Region *string `thrift:"region,2,optional" frugal:"2,optional,string" json:"region,omitempty"`
- Ak *string `thrift:"ak,3,optional" frugal:"3,optional,string" json:"ak,omitempty"`
- Sk *string `thrift:"sk,4,optional" frugal:"4,optional,string" json:"sk,omitempty"`
- MaxConn int32 `thrift:"max_conn,5,optional" frugal:"5,optional,i32" json:"max_conn,omitempty"`
- RequestTimeoutMs int32 `thrift:"request_timeout_ms,6,optional" frugal:"6,optional,i32" json:"request_timeout_ms,omitempty"`
- ConnTimeoutMs int32 `thrift:"conn_timeout_ms,7,optional" frugal:"7,optional,i32" json:"conn_timeout_ms,omitempty"`
- RootPath *string `thrift:"root_path,8,optional" frugal:"8,optional,string" json:"root_path,omitempty"`
- Bucket *string `thrift:"bucket,9,optional" frugal:"9,optional,string" json:"bucket,omitempty"`
- UsePathStyle bool `thrift:"use_path_style,10,optional" frugal:"10,optional,bool" json:"use_path_style,omitempty"`
+ Endpoint *string `thrift:"endpoint,1,optional" frugal:"1,optional,string" json:"endpoint,omitempty"`
+ Region *string `thrift:"region,2,optional" frugal:"2,optional,string" json:"region,omitempty"`
+ Ak *string `thrift:"ak,3,optional" frugal:"3,optional,string" json:"ak,omitempty"`
+ Sk *string `thrift:"sk,4,optional" frugal:"4,optional,string" json:"sk,omitempty"`
+ MaxConn int32 `thrift:"max_conn,5,optional" frugal:"5,optional,i32" json:"max_conn,omitempty"`
+ RequestTimeoutMs int32 `thrift:"request_timeout_ms,6,optional" frugal:"6,optional,i32" json:"request_timeout_ms,omitempty"`
+ ConnTimeoutMs int32 `thrift:"conn_timeout_ms,7,optional" frugal:"7,optional,i32" json:"conn_timeout_ms,omitempty"`
+ RootPath *string `thrift:"root_path,8,optional" frugal:"8,optional,string" json:"root_path,omitempty"`
+ Bucket *string `thrift:"bucket,9,optional" frugal:"9,optional,string" json:"bucket,omitempty"`
+ UsePathStyle bool `thrift:"use_path_style,10,optional" frugal:"10,optional,bool" json:"use_path_style,omitempty"`
+ Token *string `thrift:"token,11,optional" frugal:"11,optional,string" json:"token,omitempty"`
+ Provider *TObjStorageType `thrift:"provider,12,optional" frugal:"12,optional,TObjStorageType" json:"provider,omitempty"`
}
func NewTS3StorageParam() *TS3StorageParam {
@@ -1814,13 +2344,10 @@ func NewTS3StorageParam() *TS3StorageParam {
}
func (p *TS3StorageParam) InitDefault() {
- *p = TS3StorageParam{
-
- MaxConn: 50,
- RequestTimeoutMs: 3000,
- ConnTimeoutMs: 1000,
- UsePathStyle: false,
- }
+ p.MaxConn = 50
+ p.RequestTimeoutMs = 3000
+ p.ConnTimeoutMs = 1000
+ p.UsePathStyle = false
}
var TS3StorageParam_Endpoint_DEFAULT string
@@ -1912,6 +2439,24 @@ func (p *TS3StorageParam) GetUsePathStyle() (v bool) {
}
return p.UsePathStyle
}
+
+var TS3StorageParam_Token_DEFAULT string
+
+func (p *TS3StorageParam) GetToken() (v string) {
+ if !p.IsSetToken() {
+ return TS3StorageParam_Token_DEFAULT
+ }
+ return *p.Token
+}
+
+var TS3StorageParam_Provider_DEFAULT TObjStorageType
+
+func (p *TS3StorageParam) GetProvider() (v TObjStorageType) {
+ if !p.IsSetProvider() {
+ return TS3StorageParam_Provider_DEFAULT
+ }
+ return *p.Provider
+}
func (p *TS3StorageParam) SetEndpoint(val *string) {
p.Endpoint = val
}
@@ -1942,6 +2487,12 @@ func (p *TS3StorageParam) SetBucket(val *string) {
func (p *TS3StorageParam) SetUsePathStyle(val bool) {
p.UsePathStyle = val
}
+func (p *TS3StorageParam) SetToken(val *string) {
+ p.Token = val
+}
+func (p *TS3StorageParam) SetProvider(val *TObjStorageType) {
+ p.Provider = val
+}
var fieldIDToName_TS3StorageParam = map[int16]string{
1: "endpoint",
@@ -1954,6 +2505,8 @@ var fieldIDToName_TS3StorageParam = map[int16]string{
8: "root_path",
9: "bucket",
10: "use_path_style",
+ 11: "token",
+ 12: "provider",
}
func (p *TS3StorageParam) IsSetEndpoint() bool {
@@ -1996,6 +2549,14 @@ func (p *TS3StorageParam) IsSetUsePathStyle() bool {
return p.UsePathStyle != TS3StorageParam_UsePathStyle_DEFAULT
}
+func (p *TS3StorageParam) IsSetToken() bool {
+ return p.Token != nil
+}
+
+func (p *TS3StorageParam) IsSetProvider() bool {
+ return p.Provider != nil
+}
+
func (p *TS3StorageParam) Read(iprot thrift.TProtocol) (err error) {
var fieldTypeId thrift.TType
@@ -2020,107 +2581,102 @@ func (p *TS3StorageParam) Read(iprot thrift.TProtocol) (err error) {
if err = p.ReadField1(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 2:
if fieldTypeId == thrift.STRING {
if err = p.ReadField2(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 3:
if fieldTypeId == thrift.STRING {
if err = p.ReadField3(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 4:
if fieldTypeId == thrift.STRING {
if err = p.ReadField4(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 5:
if fieldTypeId == thrift.I32 {
if err = p.ReadField5(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 6:
if fieldTypeId == thrift.I32 {
if err = p.ReadField6(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 7:
if fieldTypeId == thrift.I32 {
if err = p.ReadField7(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 8:
if fieldTypeId == thrift.STRING {
if err = p.ReadField8(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 9:
if fieldTypeId == thrift.STRING {
if err = p.ReadField9(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 10:
if fieldTypeId == thrift.BOOL {
if err = p.ReadField10(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ case 11:
+ if fieldTypeId == thrift.STRING {
+ if err = p.ReadField11(iprot); err != nil {
+ goto ReadFieldError
+ }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ case 12:
+ if fieldTypeId == thrift.I32 {
+ if err = p.ReadField12(iprot); err != nil {
+ goto ReadFieldError
}
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
default:
if err = iprot.Skip(fieldTypeId); err != nil {
goto SkipFieldError
}
}
-
if err = iprot.ReadFieldEnd(); err != nil {
goto ReadFieldEndError
}
@@ -2146,92 +2702,136 @@ ReadStructEndError:
}
func (p *TS3StorageParam) ReadField1(iprot thrift.TProtocol) error {
+
+ var _field *string
if v, err := iprot.ReadString(); err != nil {
return err
} else {
- p.Endpoint = &v
+ _field = &v
}
+ p.Endpoint = _field
return nil
}
-
func (p *TS3StorageParam) ReadField2(iprot thrift.TProtocol) error {
+
+ var _field *string
if v, err := iprot.ReadString(); err != nil {
return err
} else {
- p.Region = &v
+ _field = &v
}
+ p.Region = _field
return nil
}
-
func (p *TS3StorageParam) ReadField3(iprot thrift.TProtocol) error {
+
+ var _field *string
if v, err := iprot.ReadString(); err != nil {
return err
} else {
- p.Ak = &v
+ _field = &v
}
+ p.Ak = _field
return nil
}
-
func (p *TS3StorageParam) ReadField4(iprot thrift.TProtocol) error {
+
+ var _field *string
if v, err := iprot.ReadString(); err != nil {
return err
} else {
- p.Sk = &v
+ _field = &v
}
+ p.Sk = _field
return nil
}
-
func (p *TS3StorageParam) ReadField5(iprot thrift.TProtocol) error {
+
+ var _field int32
if v, err := iprot.ReadI32(); err != nil {
return err
} else {
- p.MaxConn = v
+ _field = v
}
+ p.MaxConn = _field
return nil
}
-
func (p *TS3StorageParam) ReadField6(iprot thrift.TProtocol) error {
+
+ var _field int32
if v, err := iprot.ReadI32(); err != nil {
return err
} else {
- p.RequestTimeoutMs = v
+ _field = v
}
+ p.RequestTimeoutMs = _field
return nil
}
-
func (p *TS3StorageParam) ReadField7(iprot thrift.TProtocol) error {
+
+ var _field int32
if v, err := iprot.ReadI32(); err != nil {
return err
} else {
- p.ConnTimeoutMs = v
+ _field = v
}
+ p.ConnTimeoutMs = _field
return nil
}
-
func (p *TS3StorageParam) ReadField8(iprot thrift.TProtocol) error {
+
+ var _field *string
if v, err := iprot.ReadString(); err != nil {
return err
} else {
- p.RootPath = &v
+ _field = &v
}
+ p.RootPath = _field
return nil
}
-
func (p *TS3StorageParam) ReadField9(iprot thrift.TProtocol) error {
+
+ var _field *string
if v, err := iprot.ReadString(); err != nil {
return err
} else {
- p.Bucket = &v
+ _field = &v
}
+ p.Bucket = _field
return nil
}
-
func (p *TS3StorageParam) ReadField10(iprot thrift.TProtocol) error {
+
+ var _field bool
if v, err := iprot.ReadBool(); err != nil {
return err
} else {
- p.UsePathStyle = v
+ _field = v
+ }
+ p.UsePathStyle = _field
+ return nil
+}
+func (p *TS3StorageParam) ReadField11(iprot thrift.TProtocol) error {
+
+ var _field *string
+ if v, err := iprot.ReadString(); err != nil {
+ return err
+ } else {
+ _field = &v
+ }
+ p.Token = _field
+ return nil
+}
+func (p *TS3StorageParam) ReadField12(iprot thrift.TProtocol) error {
+
+ var _field *TObjStorageType
+ if v, err := iprot.ReadI32(); err != nil {
+ return err
+ } else {
+ tmp := TObjStorageType(v)
+ _field = &tmp
}
+ p.Provider = _field
return nil
}
@@ -2281,7 +2881,14 @@ func (p *TS3StorageParam) Write(oprot thrift.TProtocol) (err error) {
fieldId = 10
goto WriteFieldError
}
-
+ if err = p.writeField11(oprot); err != nil {
+ fieldId = 11
+ goto WriteFieldError
+ }
+ if err = p.writeField12(oprot); err != nil {
+ fieldId = 12
+ goto WriteFieldError
+ }
}
if err = oprot.WriteFieldStop(); err != nil {
goto WriteFieldStopError
@@ -2490,13 +3097,52 @@ WriteFieldEndError:
return thrift.PrependError(fmt.Sprintf("%T write field 10 end error: ", p), err)
}
-func (p *TS3StorageParam) String() string {
- if p == nil {
- return ""
- }
- return fmt.Sprintf("TS3StorageParam(%+v)", *p)
-}
-
+func (p *TS3StorageParam) writeField11(oprot thrift.TProtocol) (err error) {
+ if p.IsSetToken() {
+ if err = oprot.WriteFieldBegin("token", thrift.STRING, 11); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteString(*p.Token); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 11 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 11 end error: ", p), err)
+}
+
+func (p *TS3StorageParam) writeField12(oprot thrift.TProtocol) (err error) {
+ if p.IsSetProvider() {
+ if err = oprot.WriteFieldBegin("provider", thrift.I32, 12); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteI32(int32(*p.Provider)); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 12 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 12 end error: ", p), err)
+}
+
+func (p *TS3StorageParam) String() string {
+ if p == nil {
+ return ""
+ }
+ return fmt.Sprintf("TS3StorageParam(%+v)", *p)
+
+}
+
func (p *TS3StorageParam) DeepEqual(ano *TS3StorageParam) bool {
if p == ano {
return true
@@ -2533,6 +3179,12 @@ func (p *TS3StorageParam) DeepEqual(ano *TS3StorageParam) bool {
if !p.Field10DeepEqual(ano.UsePathStyle) {
return false
}
+ if !p.Field11DeepEqual(ano.Token) {
+ return false
+ }
+ if !p.Field12DeepEqual(ano.Provider) {
+ return false
+ }
return true
}
@@ -2636,6 +3288,30 @@ func (p *TS3StorageParam) Field10DeepEqual(src bool) bool {
}
return true
}
+func (p *TS3StorageParam) Field11DeepEqual(src *string) bool {
+
+ if p.Token == src {
+ return true
+ } else if p.Token == nil || src == nil {
+ return false
+ }
+ if strings.Compare(*p.Token, *src) != 0 {
+ return false
+ }
+ return true
+}
+func (p *TS3StorageParam) Field12DeepEqual(src *TObjStorageType) bool {
+
+ if p.Provider == src {
+ return true
+ } else if p.Provider == nil || src == nil {
+ return false
+ }
+ if *p.Provider != *src {
+ return false
+ }
+ return true
+}
type TStoragePolicy struct {
Id *int64 `thrift:"id,1,optional" frugal:"1,optional,i64" json:"id,omitempty"`
@@ -2651,7 +3327,6 @@ func NewTStoragePolicy() *TStoragePolicy {
}
func (p *TStoragePolicy) InitDefault() {
- *p = TStoragePolicy{}
}
var TStoragePolicy_Id_DEFAULT int64
@@ -2783,67 +3458,54 @@ func (p *TStoragePolicy) Read(iprot thrift.TProtocol) (err error) {
if err = p.ReadField1(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 2:
if fieldTypeId == thrift.STRING {
if err = p.ReadField2(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 3:
if fieldTypeId == thrift.I64 {
if err = p.ReadField3(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 4:
if fieldTypeId == thrift.I64 {
if err = p.ReadField4(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 5:
if fieldTypeId == thrift.I64 {
if err = p.ReadField5(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 6:
if fieldTypeId == thrift.I64 {
if err = p.ReadField6(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
default:
if err = iprot.Skip(fieldTypeId); err != nil {
goto SkipFieldError
}
}
-
if err = iprot.ReadFieldEnd(); err != nil {
goto ReadFieldEndError
}
@@ -2869,56 +3531,69 @@ ReadStructEndError:
}
func (p *TStoragePolicy) ReadField1(iprot thrift.TProtocol) error {
+
+ var _field *int64
if v, err := iprot.ReadI64(); err != nil {
return err
} else {
- p.Id = &v
+ _field = &v
}
+ p.Id = _field
return nil
}
-
func (p *TStoragePolicy) ReadField2(iprot thrift.TProtocol) error {
+
+ var _field *string
if v, err := iprot.ReadString(); err != nil {
return err
} else {
- p.Name = &v
+ _field = &v
}
+ p.Name = _field
return nil
}
-
func (p *TStoragePolicy) ReadField3(iprot thrift.TProtocol) error {
+
+ var _field *int64
if v, err := iprot.ReadI64(); err != nil {
return err
} else {
- p.Version = &v
+ _field = &v
}
+ p.Version = _field
return nil
}
-
func (p *TStoragePolicy) ReadField4(iprot thrift.TProtocol) error {
+
+ var _field *int64
if v, err := iprot.ReadI64(); err != nil {
return err
} else {
- p.CooldownDatetime = &v
+ _field = &v
}
+ p.CooldownDatetime = _field
return nil
}
-
func (p *TStoragePolicy) ReadField5(iprot thrift.TProtocol) error {
+
+ var _field *int64
if v, err := iprot.ReadI64(); err != nil {
return err
} else {
- p.CooldownTtl = &v
+ _field = &v
}
+ p.CooldownTtl = _field
return nil
}
-
func (p *TStoragePolicy) ReadField6(iprot thrift.TProtocol) error {
+
+ var _field *int64
if v, err := iprot.ReadI64(); err != nil {
return err
} else {
- p.ResourceId = &v
+ _field = &v
}
+ p.ResourceId = _field
return nil
}
@@ -2952,7 +3627,6 @@ func (p *TStoragePolicy) Write(oprot thrift.TProtocol) (err error) {
fieldId = 6
goto WriteFieldError
}
-
}
if err = oprot.WriteFieldStop(); err != nil {
goto WriteFieldStopError
@@ -3090,6 +3764,7 @@ func (p *TStoragePolicy) String() string {
return ""
}
return fmt.Sprintf("TStoragePolicy(%+v)", *p)
+
}
func (p *TStoragePolicy) DeepEqual(ano *TStoragePolicy) bool {
@@ -3193,10 +3868,11 @@ func (p *TStoragePolicy) Field6DeepEqual(src *int64) bool {
}
type TStorageResource struct {
- Id *int64 `thrift:"id,1,optional" frugal:"1,optional,i64" json:"id,omitempty"`
- Name *string `thrift:"name,2,optional" frugal:"2,optional,string" json:"name,omitempty"`
- Version *int64 `thrift:"version,3,optional" frugal:"3,optional,i64" json:"version,omitempty"`
- S3StorageParam *TS3StorageParam `thrift:"s3_storage_param,4,optional" frugal:"4,optional,TS3StorageParam" json:"s3_storage_param,omitempty"`
+ Id *int64 `thrift:"id,1,optional" frugal:"1,optional,i64" json:"id,omitempty"`
+ Name *string `thrift:"name,2,optional" frugal:"2,optional,string" json:"name,omitempty"`
+ Version *int64 `thrift:"version,3,optional" frugal:"3,optional,i64" json:"version,omitempty"`
+ S3StorageParam *TS3StorageParam `thrift:"s3_storage_param,4,optional" frugal:"4,optional,TS3StorageParam" json:"s3_storage_param,omitempty"`
+ HdfsStorageParam *plannodes.THdfsParams `thrift:"hdfs_storage_param,5,optional" frugal:"5,optional,plannodes.THdfsParams" json:"hdfs_storage_param,omitempty"`
}
func NewTStorageResource() *TStorageResource {
@@ -3204,7 +3880,6 @@ func NewTStorageResource() *TStorageResource {
}
func (p *TStorageResource) InitDefault() {
- *p = TStorageResource{}
}
var TStorageResource_Id_DEFAULT int64
@@ -3242,6 +3917,15 @@ func (p *TStorageResource) GetS3StorageParam() (v *TS3StorageParam) {
}
return p.S3StorageParam
}
+
+var TStorageResource_HdfsStorageParam_DEFAULT *plannodes.THdfsParams
+
+func (p *TStorageResource) GetHdfsStorageParam() (v *plannodes.THdfsParams) {
+ if !p.IsSetHdfsStorageParam() {
+ return TStorageResource_HdfsStorageParam_DEFAULT
+ }
+ return p.HdfsStorageParam
+}
func (p *TStorageResource) SetId(val *int64) {
p.Id = val
}
@@ -3254,12 +3938,16 @@ func (p *TStorageResource) SetVersion(val *int64) {
func (p *TStorageResource) SetS3StorageParam(val *TS3StorageParam) {
p.S3StorageParam = val
}
+func (p *TStorageResource) SetHdfsStorageParam(val *plannodes.THdfsParams) {
+ p.HdfsStorageParam = val
+}
var fieldIDToName_TStorageResource = map[int16]string{
1: "id",
2: "name",
3: "version",
4: "s3_storage_param",
+ 5: "hdfs_storage_param",
}
func (p *TStorageResource) IsSetId() bool {
@@ -3278,6 +3966,10 @@ func (p *TStorageResource) IsSetS3StorageParam() bool {
return p.S3StorageParam != nil
}
+func (p *TStorageResource) IsSetHdfsStorageParam() bool {
+ return p.HdfsStorageParam != nil
+}
+
func (p *TStorageResource) Read(iprot thrift.TProtocol) (err error) {
var fieldTypeId thrift.TType
@@ -3302,47 +3994,46 @@ func (p *TStorageResource) Read(iprot thrift.TProtocol) (err error) {
if err = p.ReadField1(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 2:
if fieldTypeId == thrift.STRING {
if err = p.ReadField2(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 3:
if fieldTypeId == thrift.I64 {
if err = p.ReadField3(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 4:
if fieldTypeId == thrift.STRUCT {
if err = p.ReadField4(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ case 5:
+ if fieldTypeId == thrift.STRUCT {
+ if err = p.ReadField5(iprot); err != nil {
+ goto ReadFieldError
}
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
default:
if err = iprot.Skip(fieldTypeId); err != nil {
goto SkipFieldError
}
}
-
if err = iprot.ReadFieldEnd(); err != nil {
goto ReadFieldEndError
}
@@ -3368,37 +4059,52 @@ ReadStructEndError:
}
func (p *TStorageResource) ReadField1(iprot thrift.TProtocol) error {
+
+ var _field *int64
if v, err := iprot.ReadI64(); err != nil {
return err
} else {
- p.Id = &v
+ _field = &v
}
+ p.Id = _field
return nil
}
-
func (p *TStorageResource) ReadField2(iprot thrift.TProtocol) error {
+
+ var _field *string
if v, err := iprot.ReadString(); err != nil {
return err
} else {
- p.Name = &v
+ _field = &v
}
+ p.Name = _field
return nil
}
-
func (p *TStorageResource) ReadField3(iprot thrift.TProtocol) error {
+
+ var _field *int64
if v, err := iprot.ReadI64(); err != nil {
return err
} else {
- p.Version = &v
+ _field = &v
}
+ p.Version = _field
return nil
}
-
func (p *TStorageResource) ReadField4(iprot thrift.TProtocol) error {
- p.S3StorageParam = NewTS3StorageParam()
- if err := p.S3StorageParam.Read(iprot); err != nil {
+ _field := NewTS3StorageParam()
+ if err := _field.Read(iprot); err != nil {
+ return err
+ }
+ p.S3StorageParam = _field
+ return nil
+}
+func (p *TStorageResource) ReadField5(iprot thrift.TProtocol) error {
+ _field := plannodes.NewTHdfsParams()
+ if err := _field.Read(iprot); err != nil {
return err
}
+ p.HdfsStorageParam = _field
return nil
}
@@ -3424,7 +4130,10 @@ func (p *TStorageResource) Write(oprot thrift.TProtocol) (err error) {
fieldId = 4
goto WriteFieldError
}
-
+ if err = p.writeField5(oprot); err != nil {
+ fieldId = 5
+ goto WriteFieldError
+ }
}
if err = oprot.WriteFieldStop(); err != nil {
goto WriteFieldStopError
@@ -3519,11 +4228,31 @@ WriteFieldEndError:
return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err)
}
+func (p *TStorageResource) writeField5(oprot thrift.TProtocol) (err error) {
+ if p.IsSetHdfsStorageParam() {
+ if err = oprot.WriteFieldBegin("hdfs_storage_param", thrift.STRUCT, 5); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := p.HdfsStorageParam.Write(oprot); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 5 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err)
+}
+
func (p *TStorageResource) String() string {
if p == nil {
return ""
}
return fmt.Sprintf("TStorageResource(%+v)", *p)
+
}
func (p *TStorageResource) DeepEqual(ano *TStorageResource) bool {
@@ -3544,6 +4273,9 @@ func (p *TStorageResource) DeepEqual(ano *TStorageResource) bool {
if !p.Field4DeepEqual(ano.S3StorageParam) {
return false
}
+ if !p.Field5DeepEqual(ano.HdfsStorageParam) {
+ return false
+ }
return true
}
@@ -3590,6 +4322,13 @@ func (p *TStorageResource) Field4DeepEqual(src *TS3StorageParam) bool {
}
return true
}
+func (p *TStorageResource) Field5DeepEqual(src *plannodes.THdfsParams) bool {
+
+ if !p.HdfsStorageParam.DeepEqual(src) {
+ return false
+ }
+ return true
+}
type TPushStoragePolicyReq struct {
StoragePolicy []*TStoragePolicy `thrift:"storage_policy,1,optional" frugal:"1,optional,list" json:"storage_policy,omitempty"`
@@ -3602,7 +4341,6 @@ func NewTPushStoragePolicyReq() *TPushStoragePolicyReq {
}
func (p *TPushStoragePolicyReq) InitDefault() {
- *p = TPushStoragePolicyReq{}
}
var TPushStoragePolicyReq_StoragePolicy_DEFAULT []*TStoragePolicy
@@ -3683,37 +4421,30 @@ func (p *TPushStoragePolicyReq) Read(iprot thrift.TProtocol) (err error) {
if err = p.ReadField1(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 2:
if fieldTypeId == thrift.LIST {
if err = p.ReadField2(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 3:
if fieldTypeId == thrift.LIST {
if err = p.ReadField3(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
default:
if err = iprot.Skip(fieldTypeId); err != nil {
goto SkipFieldError
}
}
-
if err = iprot.ReadFieldEnd(); err != nil {
goto ReadFieldEndError
}
@@ -3743,48 +4474,55 @@ func (p *TPushStoragePolicyReq) ReadField1(iprot thrift.TProtocol) error {
if err != nil {
return err
}
- p.StoragePolicy = make([]*TStoragePolicy, 0, size)
+ _field := make([]*TStoragePolicy, 0, size)
+ values := make([]TStoragePolicy, size)
for i := 0; i < size; i++ {
- _elem := NewTStoragePolicy()
+ _elem := &values[i]
+ _elem.InitDefault()
+
if err := _elem.Read(iprot); err != nil {
return err
}
- p.StoragePolicy = append(p.StoragePolicy, _elem)
+ _field = append(_field, _elem)
}
if err := iprot.ReadListEnd(); err != nil {
return err
}
+ p.StoragePolicy = _field
return nil
}
-
func (p *TPushStoragePolicyReq) ReadField2(iprot thrift.TProtocol) error {
_, size, err := iprot.ReadListBegin()
if err != nil {
return err
}
- p.Resource = make([]*TStorageResource, 0, size)
+ _field := make([]*TStorageResource, 0, size)
+ values := make([]TStorageResource, size)
for i := 0; i < size; i++ {
- _elem := NewTStorageResource()
+ _elem := &values[i]
+ _elem.InitDefault()
+
if err := _elem.Read(iprot); err != nil {
return err
}
- p.Resource = append(p.Resource, _elem)
+ _field = append(_field, _elem)
}
if err := iprot.ReadListEnd(); err != nil {
return err
}
+ p.Resource = _field
return nil
}
-
func (p *TPushStoragePolicyReq) ReadField3(iprot thrift.TProtocol) error {
_, size, err := iprot.ReadListBegin()
if err != nil {
return err
}
- p.DroppedStoragePolicy = make([]int64, 0, size)
+ _field := make([]int64, 0, size)
for i := 0; i < size; i++ {
+
var _elem int64
if v, err := iprot.ReadI64(); err != nil {
return err
@@ -3792,11 +4530,12 @@ func (p *TPushStoragePolicyReq) ReadField3(iprot thrift.TProtocol) error {
_elem = v
}
- p.DroppedStoragePolicy = append(p.DroppedStoragePolicy, _elem)
+ _field = append(_field, _elem)
}
if err := iprot.ReadListEnd(); err != nil {
return err
}
+ p.DroppedStoragePolicy = _field
return nil
}
@@ -3818,7 +4557,6 @@ func (p *TPushStoragePolicyReq) Write(oprot thrift.TProtocol) (err error) {
fieldId = 3
goto WriteFieldError
}
-
}
if err = oprot.WriteFieldStop(); err != nil {
goto WriteFieldStopError
@@ -3923,6 +4661,7 @@ func (p *TPushStoragePolicyReq) String() string {
return ""
}
return fmt.Sprintf("TPushStoragePolicyReq(%+v)", *p)
+
}
func (p *TPushStoragePolicyReq) DeepEqual(ano *TPushStoragePolicyReq) bool {
@@ -3983,93 +4722,19 @@ func (p *TPushStoragePolicyReq) Field3DeepEqual(src []int64) bool {
return true
}
-type TBinlogConfig struct {
- Enable *bool `thrift:"enable,1,optional" frugal:"1,optional,bool" json:"enable,omitempty"`
- TtlSeconds *int64 `thrift:"ttl_seconds,2,optional" frugal:"2,optional,i64" json:"ttl_seconds,omitempty"`
- MaxBytes *int64 `thrift:"max_bytes,3,optional" frugal:"3,optional,i64" json:"max_bytes,omitempty"`
- MaxHistoryNums *int64 `thrift:"max_history_nums,4,optional" frugal:"4,optional,i64" json:"max_history_nums,omitempty"`
-}
-
-func NewTBinlogConfig() *TBinlogConfig {
- return &TBinlogConfig{}
-}
-
-func (p *TBinlogConfig) InitDefault() {
- *p = TBinlogConfig{}
-}
-
-var TBinlogConfig_Enable_DEFAULT bool
-
-func (p *TBinlogConfig) GetEnable() (v bool) {
- if !p.IsSetEnable() {
- return TBinlogConfig_Enable_DEFAULT
- }
- return *p.Enable
-}
-
-var TBinlogConfig_TtlSeconds_DEFAULT int64
-
-func (p *TBinlogConfig) GetTtlSeconds() (v int64) {
- if !p.IsSetTtlSeconds() {
- return TBinlogConfig_TtlSeconds_DEFAULT
- }
- return *p.TtlSeconds
-}
-
-var TBinlogConfig_MaxBytes_DEFAULT int64
-
-func (p *TBinlogConfig) GetMaxBytes() (v int64) {
- if !p.IsSetMaxBytes() {
- return TBinlogConfig_MaxBytes_DEFAULT
- }
- return *p.MaxBytes
-}
-
-var TBinlogConfig_MaxHistoryNums_DEFAULT int64
-
-func (p *TBinlogConfig) GetMaxHistoryNums() (v int64) {
- if !p.IsSetMaxHistoryNums() {
- return TBinlogConfig_MaxHistoryNums_DEFAULT
- }
- return *p.MaxHistoryNums
-}
-func (p *TBinlogConfig) SetEnable(val *bool) {
- p.Enable = val
-}
-func (p *TBinlogConfig) SetTtlSeconds(val *int64) {
- p.TtlSeconds = val
-}
-func (p *TBinlogConfig) SetMaxBytes(val *int64) {
- p.MaxBytes = val
-}
-func (p *TBinlogConfig) SetMaxHistoryNums(val *int64) {
- p.MaxHistoryNums = val
-}
-
-var fieldIDToName_TBinlogConfig = map[int16]string{
- 1: "enable",
- 2: "ttl_seconds",
- 3: "max_bytes",
- 4: "max_history_nums",
-}
-
-func (p *TBinlogConfig) IsSetEnable() bool {
- return p.Enable != nil
+type TCleanTrashReq struct {
}
-func (p *TBinlogConfig) IsSetTtlSeconds() bool {
- return p.TtlSeconds != nil
+func NewTCleanTrashReq() *TCleanTrashReq {
+ return &TCleanTrashReq{}
}
-func (p *TBinlogConfig) IsSetMaxBytes() bool {
- return p.MaxBytes != nil
+func (p *TCleanTrashReq) InitDefault() {
}
-func (p *TBinlogConfig) IsSetMaxHistoryNums() bool {
- return p.MaxHistoryNums != nil
-}
+var fieldIDToName_TCleanTrashReq = map[int16]string{}
-func (p *TBinlogConfig) Read(iprot thrift.TProtocol) (err error) {
+func (p *TCleanTrashReq) Read(iprot thrift.TProtocol) (err error) {
var fieldTypeId thrift.TType
var fieldId int16
@@ -4086,54 +4751,132 @@ func (p *TBinlogConfig) Read(iprot thrift.TProtocol) (err error) {
if fieldTypeId == thrift.STOP {
break
}
+ if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldTypeError
+ }
+ if err = iprot.ReadFieldEnd(); err != nil {
+ goto ReadFieldEndError
+ }
+ }
+ if err = iprot.ReadStructEnd(); err != nil {
+ goto ReadStructEndError
+ }
- switch fieldId {
- case 1:
- if fieldTypeId == thrift.BOOL {
- if err = p.ReadField1(iprot); err != nil {
- goto ReadFieldError
- }
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
- }
- case 2:
- if fieldTypeId == thrift.I64 {
- if err = p.ReadField2(iprot); err != nil {
- goto ReadFieldError
- }
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
- }
- case 3:
- if fieldTypeId == thrift.I64 {
- if err = p.ReadField3(iprot); err != nil {
- goto ReadFieldError
- }
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
- }
- case 4:
- if fieldTypeId == thrift.I64 {
- if err = p.ReadField4(iprot); err != nil {
+ return nil
+ReadStructBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err)
+ReadFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
+SkipFieldTypeError:
+ return thrift.PrependError(fmt.Sprintf("%T skip field type %d error", p, fieldTypeId), err)
+
+ReadFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err)
+ReadStructEndError:
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+}
+
+func (p *TCleanTrashReq) Write(oprot thrift.TProtocol) (err error) {
+ if err = oprot.WriteStructBegin("TCleanTrashReq"); err != nil {
+ goto WriteStructBeginError
+ }
+ if p != nil {
+ }
+ if err = oprot.WriteFieldStop(); err != nil {
+ goto WriteFieldStopError
+ }
+ if err = oprot.WriteStructEnd(); err != nil {
+ goto WriteStructEndError
+ }
+ return nil
+WriteStructBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+WriteFieldStopError:
+ return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err)
+WriteStructEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err)
+}
+
+func (p *TCleanTrashReq) String() string {
+ if p == nil {
+ return ""
+ }
+ return fmt.Sprintf("TCleanTrashReq(%+v)", *p)
+
+}
+
+func (p *TCleanTrashReq) DeepEqual(ano *TCleanTrashReq) bool {
+ if p == ano {
+ return true
+ } else if p == nil || ano == nil {
+ return false
+ }
+ return true
+}
+
+type TCleanUDFCacheReq struct {
+ FunctionSignature *string `thrift:"function_signature,1,optional" frugal:"1,optional,string" json:"function_signature,omitempty"`
+}
+
+func NewTCleanUDFCacheReq() *TCleanUDFCacheReq {
+ return &TCleanUDFCacheReq{}
+}
+
+func (p *TCleanUDFCacheReq) InitDefault() {
+}
+
+var TCleanUDFCacheReq_FunctionSignature_DEFAULT string
+
+func (p *TCleanUDFCacheReq) GetFunctionSignature() (v string) {
+ if !p.IsSetFunctionSignature() {
+ return TCleanUDFCacheReq_FunctionSignature_DEFAULT
+ }
+ return *p.FunctionSignature
+}
+func (p *TCleanUDFCacheReq) SetFunctionSignature(val *string) {
+ p.FunctionSignature = val
+}
+
+var fieldIDToName_TCleanUDFCacheReq = map[int16]string{
+ 1: "function_signature",
+}
+
+func (p *TCleanUDFCacheReq) IsSetFunctionSignature() bool {
+ return p.FunctionSignature != nil
+}
+
+func (p *TCleanUDFCacheReq) Read(iprot thrift.TProtocol) (err error) {
+
+ var fieldTypeId thrift.TType
+ var fieldId int16
+
+ if _, err = iprot.ReadStructBegin(); err != nil {
+ goto ReadStructBeginError
+ }
+
+ for {
+ _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin()
+ if err != nil {
+ goto ReadFieldBeginError
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+
+ switch fieldId {
+ case 1:
+ if fieldTypeId == thrift.STRING {
+ if err = p.ReadField1(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
default:
if err = iprot.Skip(fieldTypeId); err != nil {
goto SkipFieldError
}
}
-
if err = iprot.ReadFieldEnd(); err != nil {
goto ReadFieldEndError
}
@@ -4148,7 +4891,7 @@ ReadStructBeginError:
ReadFieldBeginError:
return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
ReadFieldError:
- return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TBinlogConfig[fieldId]), err)
+ return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TCleanUDFCacheReq[fieldId]), err)
SkipFieldError:
return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
@@ -4158,45 +4901,21 @@ ReadStructEndError:
return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
}
-func (p *TBinlogConfig) ReadField1(iprot thrift.TProtocol) error {
- if v, err := iprot.ReadBool(); err != nil {
- return err
- } else {
- p.Enable = &v
- }
- return nil
-}
-
-func (p *TBinlogConfig) ReadField2(iprot thrift.TProtocol) error {
- if v, err := iprot.ReadI64(); err != nil {
- return err
- } else {
- p.TtlSeconds = &v
- }
- return nil
-}
-
-func (p *TBinlogConfig) ReadField3(iprot thrift.TProtocol) error {
- if v, err := iprot.ReadI64(); err != nil {
- return err
- } else {
- p.MaxBytes = &v
- }
- return nil
-}
+func (p *TCleanUDFCacheReq) ReadField1(iprot thrift.TProtocol) error {
-func (p *TBinlogConfig) ReadField4(iprot thrift.TProtocol) error {
- if v, err := iprot.ReadI64(); err != nil {
+ var _field *string
+ if v, err := iprot.ReadString(); err != nil {
return err
} else {
- p.MaxHistoryNums = &v
+ _field = &v
}
+ p.FunctionSignature = _field
return nil
}
-func (p *TBinlogConfig) Write(oprot thrift.TProtocol) (err error) {
+func (p *TCleanUDFCacheReq) Write(oprot thrift.TProtocol) (err error) {
var fieldId int16
- if err = oprot.WriteStructBegin("TBinlogConfig"); err != nil {
+ if err = oprot.WriteStructBegin("TCleanUDFCacheReq"); err != nil {
goto WriteStructBeginError
}
if p != nil {
@@ -4204,19 +4923,6 @@ func (p *TBinlogConfig) Write(oprot thrift.TProtocol) (err error) {
fieldId = 1
goto WriteFieldError
}
- if err = p.writeField2(oprot); err != nil {
- fieldId = 2
- goto WriteFieldError
- }
- if err = p.writeField3(oprot); err != nil {
- fieldId = 3
- goto WriteFieldError
- }
- if err = p.writeField4(oprot); err != nil {
- fieldId = 4
- goto WriteFieldError
- }
-
}
if err = oprot.WriteFieldStop(); err != nil {
goto WriteFieldStopError
@@ -4235,12 +4941,12 @@ WriteStructEndError:
return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err)
}
-func (p *TBinlogConfig) writeField1(oprot thrift.TProtocol) (err error) {
- if p.IsSetEnable() {
- if err = oprot.WriteFieldBegin("enable", thrift.BOOL, 1); err != nil {
+func (p *TCleanUDFCacheReq) writeField1(oprot thrift.TProtocol) (err error) {
+ if p.IsSetFunctionSignature() {
+ if err = oprot.WriteFieldBegin("function_signature", thrift.STRING, 1); err != nil {
goto WriteFieldBeginError
}
- if err := oprot.WriteBool(*p.Enable); err != nil {
+ if err := oprot.WriteString(*p.FunctionSignature); err != nil {
return err
}
if err = oprot.WriteFieldEnd(); err != nil {
@@ -4254,428 +4960,800 @@ WriteFieldEndError:
return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err)
}
-func (p *TBinlogConfig) writeField2(oprot thrift.TProtocol) (err error) {
- if p.IsSetTtlSeconds() {
- if err = oprot.WriteFieldBegin("ttl_seconds", thrift.I64, 2); err != nil {
- goto WriteFieldBeginError
- }
- if err := oprot.WriteI64(*p.TtlSeconds); err != nil {
- return err
- }
- if err = oprot.WriteFieldEnd(); err != nil {
- goto WriteFieldEndError
- }
- }
- return nil
-WriteFieldBeginError:
- return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err)
-WriteFieldEndError:
- return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err)
-}
-
-func (p *TBinlogConfig) writeField3(oprot thrift.TProtocol) (err error) {
- if p.IsSetMaxBytes() {
- if err = oprot.WriteFieldBegin("max_bytes", thrift.I64, 3); err != nil {
- goto WriteFieldBeginError
- }
- if err := oprot.WriteI64(*p.MaxBytes); err != nil {
- return err
- }
- if err = oprot.WriteFieldEnd(); err != nil {
- goto WriteFieldEndError
- }
- }
- return nil
-WriteFieldBeginError:
- return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err)
-WriteFieldEndError:
- return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err)
-}
-
-func (p *TBinlogConfig) writeField4(oprot thrift.TProtocol) (err error) {
- if p.IsSetMaxHistoryNums() {
- if err = oprot.WriteFieldBegin("max_history_nums", thrift.I64, 4); err != nil {
- goto WriteFieldBeginError
- }
- if err := oprot.WriteI64(*p.MaxHistoryNums); err != nil {
- return err
- }
- if err = oprot.WriteFieldEnd(); err != nil {
- goto WriteFieldEndError
- }
- }
- return nil
-WriteFieldBeginError:
- return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err)
-WriteFieldEndError:
- return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err)
-}
-
-func (p *TBinlogConfig) String() string {
+func (p *TCleanUDFCacheReq) String() string {
if p == nil {
return ""
}
- return fmt.Sprintf("TBinlogConfig(%+v)", *p)
+ return fmt.Sprintf("TCleanUDFCacheReq(%+v)", *p)
+
}
-func (p *TBinlogConfig) DeepEqual(ano *TBinlogConfig) bool {
+func (p *TCleanUDFCacheReq) DeepEqual(ano *TCleanUDFCacheReq) bool {
if p == ano {
return true
} else if p == nil || ano == nil {
return false
}
- if !p.Field1DeepEqual(ano.Enable) {
- return false
- }
- if !p.Field2DeepEqual(ano.TtlSeconds) {
- return false
- }
- if !p.Field3DeepEqual(ano.MaxBytes) {
- return false
- }
- if !p.Field4DeepEqual(ano.MaxHistoryNums) {
+ if !p.Field1DeepEqual(ano.FunctionSignature) {
return false
}
return true
}
-func (p *TBinlogConfig) Field1DeepEqual(src *bool) bool {
+func (p *TCleanUDFCacheReq) Field1DeepEqual(src *string) bool {
- if p.Enable == src {
+ if p.FunctionSignature == src {
return true
- } else if p.Enable == nil || src == nil {
+ } else if p.FunctionSignature == nil || src == nil {
return false
}
- if *p.Enable != *src {
+ if strings.Compare(*p.FunctionSignature, *src) != 0 {
return false
}
return true
}
-func (p *TBinlogConfig) Field2DeepEqual(src *int64) bool {
- if p.TtlSeconds == src {
- return true
- } else if p.TtlSeconds == nil || src == nil {
- return false
- }
- if *p.TtlSeconds != *src {
- return false
- }
- return true
+type TBinlogConfig struct {
+ Enable *bool `thrift:"enable,1,optional" frugal:"1,optional,bool" json:"enable,omitempty"`
+ TtlSeconds *int64 `thrift:"ttl_seconds,2,optional" frugal:"2,optional,i64" json:"ttl_seconds,omitempty"`
+ MaxBytes *int64 `thrift:"max_bytes,3,optional" frugal:"3,optional,i64" json:"max_bytes,omitempty"`
+ MaxHistoryNums *int64 `thrift:"max_history_nums,4,optional" frugal:"4,optional,i64" json:"max_history_nums,omitempty"`
}
-func (p *TBinlogConfig) Field3DeepEqual(src *int64) bool {
- if p.MaxBytes == src {
- return true
- } else if p.MaxBytes == nil || src == nil {
- return false
- }
- if *p.MaxBytes != *src {
- return false
- }
- return true
+func NewTBinlogConfig() *TBinlogConfig {
+ return &TBinlogConfig{}
}
-func (p *TBinlogConfig) Field4DeepEqual(src *int64) bool {
- if p.MaxHistoryNums == src {
- return true
- } else if p.MaxHistoryNums == nil || src == nil {
- return false
- }
- if *p.MaxHistoryNums != *src {
- return false
- }
- return true
+func (p *TBinlogConfig) InitDefault() {
}
-type TCreateTabletReq struct {
- TabletId types.TTabletId `thrift:"tablet_id,1,required" frugal:"1,required,i64" json:"tablet_id"`
- TabletSchema *TTabletSchema `thrift:"tablet_schema,2,required" frugal:"2,required,TTabletSchema" json:"tablet_schema"`
- Version *types.TVersion `thrift:"version,3,optional" frugal:"3,optional,i64" json:"version,omitempty"`
- VersionHash *types.TVersionHash `thrift:"version_hash,4,optional" frugal:"4,optional,i64" json:"version_hash,omitempty"`
- StorageMedium *types.TStorageMedium `thrift:"storage_medium,5,optional" frugal:"5,optional,TStorageMedium" json:"storage_medium,omitempty"`
- InRestoreMode *bool `thrift:"in_restore_mode,6,optional" frugal:"6,optional,bool" json:"in_restore_mode,omitempty"`
- BaseTabletId *types.TTabletId `thrift:"base_tablet_id,7,optional" frugal:"7,optional,i64" json:"base_tablet_id,omitempty"`
- BaseSchemaHash *types.TSchemaHash `thrift:"base_schema_hash,8,optional" frugal:"8,optional,i32" json:"base_schema_hash,omitempty"`
- TableId *int64 `thrift:"table_id,9,optional" frugal:"9,optional,i64" json:"table_id,omitempty"`
- PartitionId *int64 `thrift:"partition_id,10,optional" frugal:"10,optional,i64" json:"partition_id,omitempty"`
- AllocationTerm *int64 `thrift:"allocation_term,11,optional" frugal:"11,optional,i64" json:"allocation_term,omitempty"`
- IsEcoMode *bool `thrift:"is_eco_mode,12,optional" frugal:"12,optional,bool" json:"is_eco_mode,omitempty"`
- StorageFormat *TStorageFormat `thrift:"storage_format,13,optional" frugal:"13,optional,TStorageFormat" json:"storage_format,omitempty"`
- TabletType *TTabletType `thrift:"tablet_type,14,optional" frugal:"14,optional,TTabletType" json:"tablet_type,omitempty"`
- CompressionType TCompressionType `thrift:"compression_type,16,optional" frugal:"16,optional,TCompressionType" json:"compression_type,omitempty"`
- ReplicaId types.TReplicaId `thrift:"replica_id,17,optional" frugal:"17,optional,i64" json:"replica_id,omitempty"`
- EnableUniqueKeyMergeOnWrite bool `thrift:"enable_unique_key_merge_on_write,19,optional" frugal:"19,optional,bool" json:"enable_unique_key_merge_on_write,omitempty"`
- StoragePolicyId *int64 `thrift:"storage_policy_id,20,optional" frugal:"20,optional,i64" json:"storage_policy_id,omitempty"`
- BinlogConfig *TBinlogConfig `thrift:"binlog_config,21,optional" frugal:"21,optional,TBinlogConfig" json:"binlog_config,omitempty"`
- CompactionPolicy string `thrift:"compaction_policy,22,optional" frugal:"22,optional,string" json:"compaction_policy,omitempty"`
- TimeSeriesCompactionGoalSizeMbytes int64 `thrift:"time_series_compaction_goal_size_mbytes,23,optional" frugal:"23,optional,i64" json:"time_series_compaction_goal_size_mbytes,omitempty"`
- TimeSeriesCompactionFileCountThreshold int64 `thrift:"time_series_compaction_file_count_threshold,24,optional" frugal:"24,optional,i64" json:"time_series_compaction_file_count_threshold,omitempty"`
- TimeSeriesCompactionTimeThresholdSeconds int64 `thrift:"time_series_compaction_time_threshold_seconds,25,optional" frugal:"25,optional,i64" json:"time_series_compaction_time_threshold_seconds,omitempty"`
-}
-
-func NewTCreateTabletReq() *TCreateTabletReq {
- return &TCreateTabletReq{
+var TBinlogConfig_Enable_DEFAULT bool
- CompressionType: TCompressionType_LZ4F,
- ReplicaId: 0,
- EnableUniqueKeyMergeOnWrite: false,
- CompactionPolicy: "size_based",
- TimeSeriesCompactionGoalSizeMbytes: 1024,
- TimeSeriesCompactionFileCountThreshold: 2000,
- TimeSeriesCompactionTimeThresholdSeconds: 3600,
+func (p *TBinlogConfig) GetEnable() (v bool) {
+ if !p.IsSetEnable() {
+ return TBinlogConfig_Enable_DEFAULT
}
+ return *p.Enable
}
-func (p *TCreateTabletReq) InitDefault() {
- *p = TCreateTabletReq{
+var TBinlogConfig_TtlSeconds_DEFAULT int64
- CompressionType: TCompressionType_LZ4F,
- ReplicaId: 0,
- EnableUniqueKeyMergeOnWrite: false,
- CompactionPolicy: "size_based",
- TimeSeriesCompactionGoalSizeMbytes: 1024,
- TimeSeriesCompactionFileCountThreshold: 2000,
- TimeSeriesCompactionTimeThresholdSeconds: 3600,
+func (p *TBinlogConfig) GetTtlSeconds() (v int64) {
+ if !p.IsSetTtlSeconds() {
+ return TBinlogConfig_TtlSeconds_DEFAULT
}
+ return *p.TtlSeconds
}
-func (p *TCreateTabletReq) GetTabletId() (v types.TTabletId) {
- return p.TabletId
-}
-
-var TCreateTabletReq_TabletSchema_DEFAULT *TTabletSchema
+var TBinlogConfig_MaxBytes_DEFAULT int64
-func (p *TCreateTabletReq) GetTabletSchema() (v *TTabletSchema) {
- if !p.IsSetTabletSchema() {
- return TCreateTabletReq_TabletSchema_DEFAULT
+func (p *TBinlogConfig) GetMaxBytes() (v int64) {
+ if !p.IsSetMaxBytes() {
+ return TBinlogConfig_MaxBytes_DEFAULT
}
- return p.TabletSchema
+ return *p.MaxBytes
}
-var TCreateTabletReq_Version_DEFAULT types.TVersion
+var TBinlogConfig_MaxHistoryNums_DEFAULT int64
-func (p *TCreateTabletReq) GetVersion() (v types.TVersion) {
- if !p.IsSetVersion() {
- return TCreateTabletReq_Version_DEFAULT
+func (p *TBinlogConfig) GetMaxHistoryNums() (v int64) {
+ if !p.IsSetMaxHistoryNums() {
+ return TBinlogConfig_MaxHistoryNums_DEFAULT
}
- return *p.Version
+ return *p.MaxHistoryNums
}
-
-var TCreateTabletReq_VersionHash_DEFAULT types.TVersionHash
-
-func (p *TCreateTabletReq) GetVersionHash() (v types.TVersionHash) {
- if !p.IsSetVersionHash() {
- return TCreateTabletReq_VersionHash_DEFAULT
- }
- return *p.VersionHash
+func (p *TBinlogConfig) SetEnable(val *bool) {
+ p.Enable = val
}
-
-var TCreateTabletReq_StorageMedium_DEFAULT types.TStorageMedium
-
-func (p *TCreateTabletReq) GetStorageMedium() (v types.TStorageMedium) {
- if !p.IsSetStorageMedium() {
- return TCreateTabletReq_StorageMedium_DEFAULT
- }
- return *p.StorageMedium
+func (p *TBinlogConfig) SetTtlSeconds(val *int64) {
+ p.TtlSeconds = val
}
-
-var TCreateTabletReq_InRestoreMode_DEFAULT bool
-
-func (p *TCreateTabletReq) GetInRestoreMode() (v bool) {
- if !p.IsSetInRestoreMode() {
- return TCreateTabletReq_InRestoreMode_DEFAULT
- }
- return *p.InRestoreMode
+func (p *TBinlogConfig) SetMaxBytes(val *int64) {
+ p.MaxBytes = val
}
-
-var TCreateTabletReq_BaseTabletId_DEFAULT types.TTabletId
-
-func (p *TCreateTabletReq) GetBaseTabletId() (v types.TTabletId) {
- if !p.IsSetBaseTabletId() {
- return TCreateTabletReq_BaseTabletId_DEFAULT
- }
- return *p.BaseTabletId
+func (p *TBinlogConfig) SetMaxHistoryNums(val *int64) {
+ p.MaxHistoryNums = val
}
-var TCreateTabletReq_BaseSchemaHash_DEFAULT types.TSchemaHash
-
-func (p *TCreateTabletReq) GetBaseSchemaHash() (v types.TSchemaHash) {
- if !p.IsSetBaseSchemaHash() {
- return TCreateTabletReq_BaseSchemaHash_DEFAULT
- }
- return *p.BaseSchemaHash
+var fieldIDToName_TBinlogConfig = map[int16]string{
+ 1: "enable",
+ 2: "ttl_seconds",
+ 3: "max_bytes",
+ 4: "max_history_nums",
}
-var TCreateTabletReq_TableId_DEFAULT int64
-
-func (p *TCreateTabletReq) GetTableId() (v int64) {
- if !p.IsSetTableId() {
- return TCreateTabletReq_TableId_DEFAULT
- }
- return *p.TableId
+func (p *TBinlogConfig) IsSetEnable() bool {
+ return p.Enable != nil
}
-var TCreateTabletReq_PartitionId_DEFAULT int64
-
-func (p *TCreateTabletReq) GetPartitionId() (v int64) {
- if !p.IsSetPartitionId() {
- return TCreateTabletReq_PartitionId_DEFAULT
- }
- return *p.PartitionId
+func (p *TBinlogConfig) IsSetTtlSeconds() bool {
+ return p.TtlSeconds != nil
}
-var TCreateTabletReq_AllocationTerm_DEFAULT int64
-
-func (p *TCreateTabletReq) GetAllocationTerm() (v int64) {
- if !p.IsSetAllocationTerm() {
- return TCreateTabletReq_AllocationTerm_DEFAULT
- }
- return *p.AllocationTerm
+func (p *TBinlogConfig) IsSetMaxBytes() bool {
+ return p.MaxBytes != nil
}
-var TCreateTabletReq_IsEcoMode_DEFAULT bool
-
-func (p *TCreateTabletReq) GetIsEcoMode() (v bool) {
- if !p.IsSetIsEcoMode() {
- return TCreateTabletReq_IsEcoMode_DEFAULT
- }
- return *p.IsEcoMode
+func (p *TBinlogConfig) IsSetMaxHistoryNums() bool {
+ return p.MaxHistoryNums != nil
}
-var TCreateTabletReq_StorageFormat_DEFAULT TStorageFormat
+func (p *TBinlogConfig) Read(iprot thrift.TProtocol) (err error) {
-func (p *TCreateTabletReq) GetStorageFormat() (v TStorageFormat) {
- if !p.IsSetStorageFormat() {
- return TCreateTabletReq_StorageFormat_DEFAULT
+ var fieldTypeId thrift.TType
+ var fieldId int16
+
+ if _, err = iprot.ReadStructBegin(); err != nil {
+ goto ReadStructBeginError
}
- return *p.StorageFormat
-}
-var TCreateTabletReq_TabletType_DEFAULT TTabletType
+ for {
+ _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin()
+ if err != nil {
+ goto ReadFieldBeginError
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
-func (p *TCreateTabletReq) GetTabletType() (v TTabletType) {
- if !p.IsSetTabletType() {
- return TCreateTabletReq_TabletType_DEFAULT
+ switch fieldId {
+ case 1:
+ if fieldTypeId == thrift.BOOL {
+ if err = p.ReadField1(iprot); err != nil {
+ goto ReadFieldError
+ }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ case 2:
+ if fieldTypeId == thrift.I64 {
+ if err = p.ReadField2(iprot); err != nil {
+ goto ReadFieldError
+ }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ case 3:
+ if fieldTypeId == thrift.I64 {
+ if err = p.ReadField3(iprot); err != nil {
+ goto ReadFieldError
+ }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ case 4:
+ if fieldTypeId == thrift.I64 {
+ if err = p.ReadField4(iprot); err != nil {
+ goto ReadFieldError
+ }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ default:
+ if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ }
+ if err = iprot.ReadFieldEnd(); err != nil {
+ goto ReadFieldEndError
+ }
+ }
+ if err = iprot.ReadStructEnd(); err != nil {
+ goto ReadStructEndError
}
- return *p.TabletType
-}
-var TCreateTabletReq_CompressionType_DEFAULT TCompressionType = TCompressionType_LZ4F
+ return nil
+ReadStructBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err)
+ReadFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
+ReadFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TBinlogConfig[fieldId]), err)
+SkipFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
-func (p *TCreateTabletReq) GetCompressionType() (v TCompressionType) {
- if !p.IsSetCompressionType() {
- return TCreateTabletReq_CompressionType_DEFAULT
- }
- return p.CompressionType
+ReadFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err)
+ReadStructEndError:
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
}
-var TCreateTabletReq_ReplicaId_DEFAULT types.TReplicaId = 0
+func (p *TBinlogConfig) ReadField1(iprot thrift.TProtocol) error {
-func (p *TCreateTabletReq) GetReplicaId() (v types.TReplicaId) {
- if !p.IsSetReplicaId() {
- return TCreateTabletReq_ReplicaId_DEFAULT
+ var _field *bool
+ if v, err := iprot.ReadBool(); err != nil {
+ return err
+ } else {
+ _field = &v
}
- return p.ReplicaId
+ p.Enable = _field
+ return nil
}
+func (p *TBinlogConfig) ReadField2(iprot thrift.TProtocol) error {
-var TCreateTabletReq_EnableUniqueKeyMergeOnWrite_DEFAULT bool = false
-
-func (p *TCreateTabletReq) GetEnableUniqueKeyMergeOnWrite() (v bool) {
- if !p.IsSetEnableUniqueKeyMergeOnWrite() {
- return TCreateTabletReq_EnableUniqueKeyMergeOnWrite_DEFAULT
+ var _field *int64
+ if v, err := iprot.ReadI64(); err != nil {
+ return err
+ } else {
+ _field = &v
}
- return p.EnableUniqueKeyMergeOnWrite
+ p.TtlSeconds = _field
+ return nil
}
+func (p *TBinlogConfig) ReadField3(iprot thrift.TProtocol) error {
-var TCreateTabletReq_StoragePolicyId_DEFAULT int64
-
-func (p *TCreateTabletReq) GetStoragePolicyId() (v int64) {
- if !p.IsSetStoragePolicyId() {
- return TCreateTabletReq_StoragePolicyId_DEFAULT
+ var _field *int64
+ if v, err := iprot.ReadI64(); err != nil {
+ return err
+ } else {
+ _field = &v
}
- return *p.StoragePolicyId
+ p.MaxBytes = _field
+ return nil
}
+func (p *TBinlogConfig) ReadField4(iprot thrift.TProtocol) error {
-var TCreateTabletReq_BinlogConfig_DEFAULT *TBinlogConfig
-
-func (p *TCreateTabletReq) GetBinlogConfig() (v *TBinlogConfig) {
- if !p.IsSetBinlogConfig() {
- return TCreateTabletReq_BinlogConfig_DEFAULT
+ var _field *int64
+ if v, err := iprot.ReadI64(); err != nil {
+ return err
+ } else {
+ _field = &v
}
- return p.BinlogConfig
+ p.MaxHistoryNums = _field
+ return nil
}
-var TCreateTabletReq_CompactionPolicy_DEFAULT string = "size_based"
-
-func (p *TCreateTabletReq) GetCompactionPolicy() (v string) {
- if !p.IsSetCompactionPolicy() {
- return TCreateTabletReq_CompactionPolicy_DEFAULT
+func (p *TBinlogConfig) Write(oprot thrift.TProtocol) (err error) {
+ var fieldId int16
+ if err = oprot.WriteStructBegin("TBinlogConfig"); err != nil {
+ goto WriteStructBeginError
}
- return p.CompactionPolicy
+ if p != nil {
+ if err = p.writeField1(oprot); err != nil {
+ fieldId = 1
+ goto WriteFieldError
+ }
+ if err = p.writeField2(oprot); err != nil {
+ fieldId = 2
+ goto WriteFieldError
+ }
+ if err = p.writeField3(oprot); err != nil {
+ fieldId = 3
+ goto WriteFieldError
+ }
+ if err = p.writeField4(oprot); err != nil {
+ fieldId = 4
+ goto WriteFieldError
+ }
+ }
+ if err = oprot.WriteFieldStop(); err != nil {
+ goto WriteFieldStopError
+ }
+ if err = oprot.WriteStructEnd(); err != nil {
+ goto WriteStructEndError
+ }
+ return nil
+WriteStructBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+WriteFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err)
+WriteFieldStopError:
+ return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err)
+WriteStructEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err)
}
-var TCreateTabletReq_TimeSeriesCompactionGoalSizeMbytes_DEFAULT int64 = 1024
+func (p *TBinlogConfig) writeField1(oprot thrift.TProtocol) (err error) {
+ if p.IsSetEnable() {
+ if err = oprot.WriteFieldBegin("enable", thrift.BOOL, 1); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteBool(*p.Enable); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err)
+}
-func (p *TCreateTabletReq) GetTimeSeriesCompactionGoalSizeMbytes() (v int64) {
- if !p.IsSetTimeSeriesCompactionGoalSizeMbytes() {
- return TCreateTabletReq_TimeSeriesCompactionGoalSizeMbytes_DEFAULT
+func (p *TBinlogConfig) writeField2(oprot thrift.TProtocol) (err error) {
+ if p.IsSetTtlSeconds() {
+ if err = oprot.WriteFieldBegin("ttl_seconds", thrift.I64, 2); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteI64(*p.TtlSeconds); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
}
- return p.TimeSeriesCompactionGoalSizeMbytes
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err)
}
-var TCreateTabletReq_TimeSeriesCompactionFileCountThreshold_DEFAULT int64 = 2000
+func (p *TBinlogConfig) writeField3(oprot thrift.TProtocol) (err error) {
+ if p.IsSetMaxBytes() {
+ if err = oprot.WriteFieldBegin("max_bytes", thrift.I64, 3); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteI64(*p.MaxBytes); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err)
+}
-func (p *TCreateTabletReq) GetTimeSeriesCompactionFileCountThreshold() (v int64) {
- if !p.IsSetTimeSeriesCompactionFileCountThreshold() {
- return TCreateTabletReq_TimeSeriesCompactionFileCountThreshold_DEFAULT
+func (p *TBinlogConfig) writeField4(oprot thrift.TProtocol) (err error) {
+ if p.IsSetMaxHistoryNums() {
+ if err = oprot.WriteFieldBegin("max_history_nums", thrift.I64, 4); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteI64(*p.MaxHistoryNums); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
}
- return p.TimeSeriesCompactionFileCountThreshold
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err)
}
-var TCreateTabletReq_TimeSeriesCompactionTimeThresholdSeconds_DEFAULT int64 = 3600
+func (p *TBinlogConfig) String() string {
+ if p == nil {
+ return ""
+ }
+ return fmt.Sprintf("TBinlogConfig(%+v)", *p)
-func (p *TCreateTabletReq) GetTimeSeriesCompactionTimeThresholdSeconds() (v int64) {
- if !p.IsSetTimeSeriesCompactionTimeThresholdSeconds() {
- return TCreateTabletReq_TimeSeriesCompactionTimeThresholdSeconds_DEFAULT
+}
+
+func (p *TBinlogConfig) DeepEqual(ano *TBinlogConfig) bool {
+ if p == ano {
+ return true
+ } else if p == nil || ano == nil {
+ return false
}
- return p.TimeSeriesCompactionTimeThresholdSeconds
+ if !p.Field1DeepEqual(ano.Enable) {
+ return false
+ }
+ if !p.Field2DeepEqual(ano.TtlSeconds) {
+ return false
+ }
+ if !p.Field3DeepEqual(ano.MaxBytes) {
+ return false
+ }
+ if !p.Field4DeepEqual(ano.MaxHistoryNums) {
+ return false
+ }
+ return true
}
-func (p *TCreateTabletReq) SetTabletId(val types.TTabletId) {
- p.TabletId = val
+
+func (p *TBinlogConfig) Field1DeepEqual(src *bool) bool {
+
+ if p.Enable == src {
+ return true
+ } else if p.Enable == nil || src == nil {
+ return false
+ }
+ if *p.Enable != *src {
+ return false
+ }
+ return true
}
-func (p *TCreateTabletReq) SetTabletSchema(val *TTabletSchema) {
- p.TabletSchema = val
+func (p *TBinlogConfig) Field2DeepEqual(src *int64) bool {
+
+ if p.TtlSeconds == src {
+ return true
+ } else if p.TtlSeconds == nil || src == nil {
+ return false
+ }
+ if *p.TtlSeconds != *src {
+ return false
+ }
+ return true
}
-func (p *TCreateTabletReq) SetVersion(val *types.TVersion) {
- p.Version = val
+func (p *TBinlogConfig) Field3DeepEqual(src *int64) bool {
+
+ if p.MaxBytes == src {
+ return true
+ } else if p.MaxBytes == nil || src == nil {
+ return false
+ }
+ if *p.MaxBytes != *src {
+ return false
+ }
+ return true
}
-func (p *TCreateTabletReq) SetVersionHash(val *types.TVersionHash) {
- p.VersionHash = val
+func (p *TBinlogConfig) Field4DeepEqual(src *int64) bool {
+
+ if p.MaxHistoryNums == src {
+ return true
+ } else if p.MaxHistoryNums == nil || src == nil {
+ return false
+ }
+ if *p.MaxHistoryNums != *src {
+ return false
+ }
+ return true
}
-func (p *TCreateTabletReq) SetStorageMedium(val *types.TStorageMedium) {
- p.StorageMedium = val
+
+type TCreateTabletReq struct {
+ TabletId types.TTabletId `thrift:"tablet_id,1,required" frugal:"1,required,i64" json:"tablet_id"`
+ TabletSchema *TTabletSchema `thrift:"tablet_schema,2,required" frugal:"2,required,TTabletSchema" json:"tablet_schema"`
+ Version *types.TVersion `thrift:"version,3,optional" frugal:"3,optional,i64" json:"version,omitempty"`
+ VersionHash *types.TVersionHash `thrift:"version_hash,4,optional" frugal:"4,optional,i64" json:"version_hash,omitempty"`
+ StorageMedium *types.TStorageMedium `thrift:"storage_medium,5,optional" frugal:"5,optional,TStorageMedium" json:"storage_medium,omitempty"`
+ InRestoreMode *bool `thrift:"in_restore_mode,6,optional" frugal:"6,optional,bool" json:"in_restore_mode,omitempty"`
+ BaseTabletId *types.TTabletId `thrift:"base_tablet_id,7,optional" frugal:"7,optional,i64" json:"base_tablet_id,omitempty"`
+ BaseSchemaHash *types.TSchemaHash `thrift:"base_schema_hash,8,optional" frugal:"8,optional,i32" json:"base_schema_hash,omitempty"`
+ TableId *int64 `thrift:"table_id,9,optional" frugal:"9,optional,i64" json:"table_id,omitempty"`
+ PartitionId *int64 `thrift:"partition_id,10,optional" frugal:"10,optional,i64" json:"partition_id,omitempty"`
+ AllocationTerm *int64 `thrift:"allocation_term,11,optional" frugal:"11,optional,i64" json:"allocation_term,omitempty"`
+ IsEcoMode *bool `thrift:"is_eco_mode,12,optional" frugal:"12,optional,bool" json:"is_eco_mode,omitempty"`
+ StorageFormat *TStorageFormat `thrift:"storage_format,13,optional" frugal:"13,optional,TStorageFormat" json:"storage_format,omitempty"`
+ TabletType *TTabletType `thrift:"tablet_type,14,optional" frugal:"14,optional,TTabletType" json:"tablet_type,omitempty"`
+ CompressionType TCompressionType `thrift:"compression_type,16,optional" frugal:"16,optional,TCompressionType" json:"compression_type,omitempty"`
+ ReplicaId types.TReplicaId `thrift:"replica_id,17,optional" frugal:"17,optional,i64" json:"replica_id,omitempty"`
+ EnableUniqueKeyMergeOnWrite bool `thrift:"enable_unique_key_merge_on_write,19,optional" frugal:"19,optional,bool" json:"enable_unique_key_merge_on_write,omitempty"`
+ StoragePolicyId *int64 `thrift:"storage_policy_id,20,optional" frugal:"20,optional,i64" json:"storage_policy_id,omitempty"`
+ BinlogConfig *TBinlogConfig `thrift:"binlog_config,21,optional" frugal:"21,optional,TBinlogConfig" json:"binlog_config,omitempty"`
+ CompactionPolicy string `thrift:"compaction_policy,22,optional" frugal:"22,optional,string" json:"compaction_policy,omitempty"`
+ TimeSeriesCompactionGoalSizeMbytes int64 `thrift:"time_series_compaction_goal_size_mbytes,23,optional" frugal:"23,optional,i64" json:"time_series_compaction_goal_size_mbytes,omitempty"`
+ TimeSeriesCompactionFileCountThreshold int64 `thrift:"time_series_compaction_file_count_threshold,24,optional" frugal:"24,optional,i64" json:"time_series_compaction_file_count_threshold,omitempty"`
+ TimeSeriesCompactionTimeThresholdSeconds int64 `thrift:"time_series_compaction_time_threshold_seconds,25,optional" frugal:"25,optional,i64" json:"time_series_compaction_time_threshold_seconds,omitempty"`
+ TimeSeriesCompactionEmptyRowsetsThreshold int64 `thrift:"time_series_compaction_empty_rowsets_threshold,26,optional" frugal:"26,optional,i64" json:"time_series_compaction_empty_rowsets_threshold,omitempty"`
+ TimeSeriesCompactionLevelThreshold int64 `thrift:"time_series_compaction_level_threshold,27,optional" frugal:"27,optional,i64" json:"time_series_compaction_level_threshold,omitempty"`
+ InvertedIndexStorageFormat TInvertedIndexStorageFormat `thrift:"inverted_index_storage_format,28,optional" frugal:"28,optional,TInvertedIndexStorageFormat" json:"inverted_index_storage_format,omitempty"`
+ InvertedIndexFileStorageFormat types.TInvertedIndexFileStorageFormat `thrift:"inverted_index_file_storage_format,29,optional" frugal:"29,optional,TInvertedIndexFileStorageFormat" json:"inverted_index_file_storage_format,omitempty"`
+ IsInMemory bool `thrift:"is_in_memory,1000,optional" frugal:"1000,optional,bool" json:"is_in_memory,omitempty"`
+ IsPersistent bool `thrift:"is_persistent,1001,optional" frugal:"1001,optional,bool" json:"is_persistent,omitempty"`
}
-func (p *TCreateTabletReq) SetInRestoreMode(val *bool) {
- p.InRestoreMode = val
+
+func NewTCreateTabletReq() *TCreateTabletReq {
+ return &TCreateTabletReq{
+
+ CompressionType: TCompressionType_LZ4F,
+ ReplicaId: 0,
+ EnableUniqueKeyMergeOnWrite: false,
+ CompactionPolicy: "size_based",
+ TimeSeriesCompactionGoalSizeMbytes: 1024,
+ TimeSeriesCompactionFileCountThreshold: 2000,
+ TimeSeriesCompactionTimeThresholdSeconds: 3600,
+ TimeSeriesCompactionEmptyRowsetsThreshold: 5,
+ TimeSeriesCompactionLevelThreshold: 1,
+ InvertedIndexStorageFormat: TInvertedIndexStorageFormat_DEFAULT,
+ InvertedIndexFileStorageFormat: types.TInvertedIndexFileStorageFormat_V2,
+ IsInMemory: false,
+ IsPersistent: false,
+ }
}
-func (p *TCreateTabletReq) SetBaseTabletId(val *types.TTabletId) {
- p.BaseTabletId = val
+
+func (p *TCreateTabletReq) InitDefault() {
+ p.CompressionType = TCompressionType_LZ4F
+ p.ReplicaId = 0
+ p.EnableUniqueKeyMergeOnWrite = false
+ p.CompactionPolicy = "size_based"
+ p.TimeSeriesCompactionGoalSizeMbytes = 1024
+ p.TimeSeriesCompactionFileCountThreshold = 2000
+ p.TimeSeriesCompactionTimeThresholdSeconds = 3600
+ p.TimeSeriesCompactionEmptyRowsetsThreshold = 5
+ p.TimeSeriesCompactionLevelThreshold = 1
+ p.InvertedIndexStorageFormat = TInvertedIndexStorageFormat_DEFAULT
+ p.InvertedIndexFileStorageFormat = types.TInvertedIndexFileStorageFormat_V2
+ p.IsInMemory = false
+ p.IsPersistent = false
}
-func (p *TCreateTabletReq) SetBaseSchemaHash(val *types.TSchemaHash) {
- p.BaseSchemaHash = val
+
+func (p *TCreateTabletReq) GetTabletId() (v types.TTabletId) {
+ return p.TabletId
}
-func (p *TCreateTabletReq) SetTableId(val *int64) {
- p.TableId = val
+
+var TCreateTabletReq_TabletSchema_DEFAULT *TTabletSchema
+
+func (p *TCreateTabletReq) GetTabletSchema() (v *TTabletSchema) {
+ if !p.IsSetTabletSchema() {
+ return TCreateTabletReq_TabletSchema_DEFAULT
+ }
+ return p.TabletSchema
}
-func (p *TCreateTabletReq) SetPartitionId(val *int64) {
- p.PartitionId = val
+
+var TCreateTabletReq_Version_DEFAULT types.TVersion
+
+func (p *TCreateTabletReq) GetVersion() (v types.TVersion) {
+ if !p.IsSetVersion() {
+ return TCreateTabletReq_Version_DEFAULT
+ }
+ return *p.Version
}
-func (p *TCreateTabletReq) SetAllocationTerm(val *int64) {
- p.AllocationTerm = val
+
+var TCreateTabletReq_VersionHash_DEFAULT types.TVersionHash
+
+func (p *TCreateTabletReq) GetVersionHash() (v types.TVersionHash) {
+ if !p.IsSetVersionHash() {
+ return TCreateTabletReq_VersionHash_DEFAULT
+ }
+ return *p.VersionHash
}
-func (p *TCreateTabletReq) SetIsEcoMode(val *bool) {
- p.IsEcoMode = val
+
+var TCreateTabletReq_StorageMedium_DEFAULT types.TStorageMedium
+
+func (p *TCreateTabletReq) GetStorageMedium() (v types.TStorageMedium) {
+ if !p.IsSetStorageMedium() {
+ return TCreateTabletReq_StorageMedium_DEFAULT
+ }
+ return *p.StorageMedium
+}
+
+var TCreateTabletReq_InRestoreMode_DEFAULT bool
+
+func (p *TCreateTabletReq) GetInRestoreMode() (v bool) {
+ if !p.IsSetInRestoreMode() {
+ return TCreateTabletReq_InRestoreMode_DEFAULT
+ }
+ return *p.InRestoreMode
+}
+
+var TCreateTabletReq_BaseTabletId_DEFAULT types.TTabletId
+
+func (p *TCreateTabletReq) GetBaseTabletId() (v types.TTabletId) {
+ if !p.IsSetBaseTabletId() {
+ return TCreateTabletReq_BaseTabletId_DEFAULT
+ }
+ return *p.BaseTabletId
+}
+
+var TCreateTabletReq_BaseSchemaHash_DEFAULT types.TSchemaHash
+
+func (p *TCreateTabletReq) GetBaseSchemaHash() (v types.TSchemaHash) {
+ if !p.IsSetBaseSchemaHash() {
+ return TCreateTabletReq_BaseSchemaHash_DEFAULT
+ }
+ return *p.BaseSchemaHash
+}
+
+var TCreateTabletReq_TableId_DEFAULT int64
+
+func (p *TCreateTabletReq) GetTableId() (v int64) {
+ if !p.IsSetTableId() {
+ return TCreateTabletReq_TableId_DEFAULT
+ }
+ return *p.TableId
+}
+
+var TCreateTabletReq_PartitionId_DEFAULT int64
+
+func (p *TCreateTabletReq) GetPartitionId() (v int64) {
+ if !p.IsSetPartitionId() {
+ return TCreateTabletReq_PartitionId_DEFAULT
+ }
+ return *p.PartitionId
+}
+
+var TCreateTabletReq_AllocationTerm_DEFAULT int64
+
+func (p *TCreateTabletReq) GetAllocationTerm() (v int64) {
+ if !p.IsSetAllocationTerm() {
+ return TCreateTabletReq_AllocationTerm_DEFAULT
+ }
+ return *p.AllocationTerm
+}
+
+var TCreateTabletReq_IsEcoMode_DEFAULT bool
+
+func (p *TCreateTabletReq) GetIsEcoMode() (v bool) {
+ if !p.IsSetIsEcoMode() {
+ return TCreateTabletReq_IsEcoMode_DEFAULT
+ }
+ return *p.IsEcoMode
+}
+
+var TCreateTabletReq_StorageFormat_DEFAULT TStorageFormat
+
+func (p *TCreateTabletReq) GetStorageFormat() (v TStorageFormat) {
+ if !p.IsSetStorageFormat() {
+ return TCreateTabletReq_StorageFormat_DEFAULT
+ }
+ return *p.StorageFormat
+}
+
+var TCreateTabletReq_TabletType_DEFAULT TTabletType
+
+func (p *TCreateTabletReq) GetTabletType() (v TTabletType) {
+ if !p.IsSetTabletType() {
+ return TCreateTabletReq_TabletType_DEFAULT
+ }
+ return *p.TabletType
+}
+
+var TCreateTabletReq_CompressionType_DEFAULT TCompressionType = TCompressionType_LZ4F
+
+func (p *TCreateTabletReq) GetCompressionType() (v TCompressionType) {
+ if !p.IsSetCompressionType() {
+ return TCreateTabletReq_CompressionType_DEFAULT
+ }
+ return p.CompressionType
+}
+
+var TCreateTabletReq_ReplicaId_DEFAULT types.TReplicaId = 0
+
+func (p *TCreateTabletReq) GetReplicaId() (v types.TReplicaId) {
+ if !p.IsSetReplicaId() {
+ return TCreateTabletReq_ReplicaId_DEFAULT
+ }
+ return p.ReplicaId
+}
+
+var TCreateTabletReq_EnableUniqueKeyMergeOnWrite_DEFAULT bool = false
+
+func (p *TCreateTabletReq) GetEnableUniqueKeyMergeOnWrite() (v bool) {
+ if !p.IsSetEnableUniqueKeyMergeOnWrite() {
+ return TCreateTabletReq_EnableUniqueKeyMergeOnWrite_DEFAULT
+ }
+ return p.EnableUniqueKeyMergeOnWrite
+}
+
+var TCreateTabletReq_StoragePolicyId_DEFAULT int64
+
+func (p *TCreateTabletReq) GetStoragePolicyId() (v int64) {
+ if !p.IsSetStoragePolicyId() {
+ return TCreateTabletReq_StoragePolicyId_DEFAULT
+ }
+ return *p.StoragePolicyId
+}
+
+var TCreateTabletReq_BinlogConfig_DEFAULT *TBinlogConfig
+
+func (p *TCreateTabletReq) GetBinlogConfig() (v *TBinlogConfig) {
+ if !p.IsSetBinlogConfig() {
+ return TCreateTabletReq_BinlogConfig_DEFAULT
+ }
+ return p.BinlogConfig
+}
+
+var TCreateTabletReq_CompactionPolicy_DEFAULT string = "size_based"
+
+func (p *TCreateTabletReq) GetCompactionPolicy() (v string) {
+ if !p.IsSetCompactionPolicy() {
+ return TCreateTabletReq_CompactionPolicy_DEFAULT
+ }
+ return p.CompactionPolicy
+}
+
+var TCreateTabletReq_TimeSeriesCompactionGoalSizeMbytes_DEFAULT int64 = 1024
+
+func (p *TCreateTabletReq) GetTimeSeriesCompactionGoalSizeMbytes() (v int64) {
+ if !p.IsSetTimeSeriesCompactionGoalSizeMbytes() {
+ return TCreateTabletReq_TimeSeriesCompactionGoalSizeMbytes_DEFAULT
+ }
+ return p.TimeSeriesCompactionGoalSizeMbytes
+}
+
+var TCreateTabletReq_TimeSeriesCompactionFileCountThreshold_DEFAULT int64 = 2000
+
+func (p *TCreateTabletReq) GetTimeSeriesCompactionFileCountThreshold() (v int64) {
+ if !p.IsSetTimeSeriesCompactionFileCountThreshold() {
+ return TCreateTabletReq_TimeSeriesCompactionFileCountThreshold_DEFAULT
+ }
+ return p.TimeSeriesCompactionFileCountThreshold
+}
+
+var TCreateTabletReq_TimeSeriesCompactionTimeThresholdSeconds_DEFAULT int64 = 3600
+
+func (p *TCreateTabletReq) GetTimeSeriesCompactionTimeThresholdSeconds() (v int64) {
+ if !p.IsSetTimeSeriesCompactionTimeThresholdSeconds() {
+ return TCreateTabletReq_TimeSeriesCompactionTimeThresholdSeconds_DEFAULT
+ }
+ return p.TimeSeriesCompactionTimeThresholdSeconds
+}
+
+var TCreateTabletReq_TimeSeriesCompactionEmptyRowsetsThreshold_DEFAULT int64 = 5
+
+func (p *TCreateTabletReq) GetTimeSeriesCompactionEmptyRowsetsThreshold() (v int64) {
+ if !p.IsSetTimeSeriesCompactionEmptyRowsetsThreshold() {
+ return TCreateTabletReq_TimeSeriesCompactionEmptyRowsetsThreshold_DEFAULT
+ }
+ return p.TimeSeriesCompactionEmptyRowsetsThreshold
+}
+
+var TCreateTabletReq_TimeSeriesCompactionLevelThreshold_DEFAULT int64 = 1
+
+func (p *TCreateTabletReq) GetTimeSeriesCompactionLevelThreshold() (v int64) {
+ if !p.IsSetTimeSeriesCompactionLevelThreshold() {
+ return TCreateTabletReq_TimeSeriesCompactionLevelThreshold_DEFAULT
+ }
+ return p.TimeSeriesCompactionLevelThreshold
+}
+
+var TCreateTabletReq_InvertedIndexStorageFormat_DEFAULT TInvertedIndexStorageFormat = TInvertedIndexStorageFormat_DEFAULT
+
+func (p *TCreateTabletReq) GetInvertedIndexStorageFormat() (v TInvertedIndexStorageFormat) {
+ if !p.IsSetInvertedIndexStorageFormat() {
+ return TCreateTabletReq_InvertedIndexStorageFormat_DEFAULT
+ }
+ return p.InvertedIndexStorageFormat
+}
+
+var TCreateTabletReq_InvertedIndexFileStorageFormat_DEFAULT types.TInvertedIndexFileStorageFormat = types.TInvertedIndexFileStorageFormat_V2
+
+func (p *TCreateTabletReq) GetInvertedIndexFileStorageFormat() (v types.TInvertedIndexFileStorageFormat) {
+ if !p.IsSetInvertedIndexFileStorageFormat() {
+ return TCreateTabletReq_InvertedIndexFileStorageFormat_DEFAULT
+ }
+ return p.InvertedIndexFileStorageFormat
+}
+
+var TCreateTabletReq_IsInMemory_DEFAULT bool = false
+
+func (p *TCreateTabletReq) GetIsInMemory() (v bool) {
+ if !p.IsSetIsInMemory() {
+ return TCreateTabletReq_IsInMemory_DEFAULT
+ }
+ return p.IsInMemory
+}
+
+var TCreateTabletReq_IsPersistent_DEFAULT bool = false
+
+func (p *TCreateTabletReq) GetIsPersistent() (v bool) {
+ if !p.IsSetIsPersistent() {
+ return TCreateTabletReq_IsPersistent_DEFAULT
+ }
+ return p.IsPersistent
+}
+func (p *TCreateTabletReq) SetTabletId(val types.TTabletId) {
+ p.TabletId = val
+}
+func (p *TCreateTabletReq) SetTabletSchema(val *TTabletSchema) {
+ p.TabletSchema = val
+}
+func (p *TCreateTabletReq) SetVersion(val *types.TVersion) {
+ p.Version = val
+}
+func (p *TCreateTabletReq) SetVersionHash(val *types.TVersionHash) {
+ p.VersionHash = val
+}
+func (p *TCreateTabletReq) SetStorageMedium(val *types.TStorageMedium) {
+ p.StorageMedium = val
+}
+func (p *TCreateTabletReq) SetInRestoreMode(val *bool) {
+ p.InRestoreMode = val
+}
+func (p *TCreateTabletReq) SetBaseTabletId(val *types.TTabletId) {
+ p.BaseTabletId = val
+}
+func (p *TCreateTabletReq) SetBaseSchemaHash(val *types.TSchemaHash) {
+ p.BaseSchemaHash = val
+}
+func (p *TCreateTabletReq) SetTableId(val *int64) {
+ p.TableId = val
+}
+func (p *TCreateTabletReq) SetPartitionId(val *int64) {
+ p.PartitionId = val
+}
+func (p *TCreateTabletReq) SetAllocationTerm(val *int64) {
+ p.AllocationTerm = val
+}
+func (p *TCreateTabletReq) SetIsEcoMode(val *bool) {
+ p.IsEcoMode = val
}
func (p *TCreateTabletReq) SetStorageFormat(val *TStorageFormat) {
p.StorageFormat = val
@@ -4710,31 +5788,55 @@ func (p *TCreateTabletReq) SetTimeSeriesCompactionFileCountThreshold(val int64)
func (p *TCreateTabletReq) SetTimeSeriesCompactionTimeThresholdSeconds(val int64) {
p.TimeSeriesCompactionTimeThresholdSeconds = val
}
+func (p *TCreateTabletReq) SetTimeSeriesCompactionEmptyRowsetsThreshold(val int64) {
+ p.TimeSeriesCompactionEmptyRowsetsThreshold = val
+}
+func (p *TCreateTabletReq) SetTimeSeriesCompactionLevelThreshold(val int64) {
+ p.TimeSeriesCompactionLevelThreshold = val
+}
+func (p *TCreateTabletReq) SetInvertedIndexStorageFormat(val TInvertedIndexStorageFormat) {
+ p.InvertedIndexStorageFormat = val
+}
+func (p *TCreateTabletReq) SetInvertedIndexFileStorageFormat(val types.TInvertedIndexFileStorageFormat) {
+ p.InvertedIndexFileStorageFormat = val
+}
+func (p *TCreateTabletReq) SetIsInMemory(val bool) {
+ p.IsInMemory = val
+}
+func (p *TCreateTabletReq) SetIsPersistent(val bool) {
+ p.IsPersistent = val
+}
var fieldIDToName_TCreateTabletReq = map[int16]string{
- 1: "tablet_id",
- 2: "tablet_schema",
- 3: "version",
- 4: "version_hash",
- 5: "storage_medium",
- 6: "in_restore_mode",
- 7: "base_tablet_id",
- 8: "base_schema_hash",
- 9: "table_id",
- 10: "partition_id",
- 11: "allocation_term",
- 12: "is_eco_mode",
- 13: "storage_format",
- 14: "tablet_type",
- 16: "compression_type",
- 17: "replica_id",
- 19: "enable_unique_key_merge_on_write",
- 20: "storage_policy_id",
- 21: "binlog_config",
- 22: "compaction_policy",
- 23: "time_series_compaction_goal_size_mbytes",
- 24: "time_series_compaction_file_count_threshold",
- 25: "time_series_compaction_time_threshold_seconds",
+ 1: "tablet_id",
+ 2: "tablet_schema",
+ 3: "version",
+ 4: "version_hash",
+ 5: "storage_medium",
+ 6: "in_restore_mode",
+ 7: "base_tablet_id",
+ 8: "base_schema_hash",
+ 9: "table_id",
+ 10: "partition_id",
+ 11: "allocation_term",
+ 12: "is_eco_mode",
+ 13: "storage_format",
+ 14: "tablet_type",
+ 16: "compression_type",
+ 17: "replica_id",
+ 19: "enable_unique_key_merge_on_write",
+ 20: "storage_policy_id",
+ 21: "binlog_config",
+ 22: "compaction_policy",
+ 23: "time_series_compaction_goal_size_mbytes",
+ 24: "time_series_compaction_file_count_threshold",
+ 25: "time_series_compaction_time_threshold_seconds",
+ 26: "time_series_compaction_empty_rowsets_threshold",
+ 27: "time_series_compaction_level_threshold",
+ 28: "inverted_index_storage_format",
+ 29: "inverted_index_file_storage_format",
+ 1000: "is_in_memory",
+ 1001: "is_persistent",
}
func (p *TCreateTabletReq) IsSetTabletSchema() bool {
@@ -4825,6 +5927,30 @@ func (p *TCreateTabletReq) IsSetTimeSeriesCompactionTimeThresholdSeconds() bool
return p.TimeSeriesCompactionTimeThresholdSeconds != TCreateTabletReq_TimeSeriesCompactionTimeThresholdSeconds_DEFAULT
}
+func (p *TCreateTabletReq) IsSetTimeSeriesCompactionEmptyRowsetsThreshold() bool {
+ return p.TimeSeriesCompactionEmptyRowsetsThreshold != TCreateTabletReq_TimeSeriesCompactionEmptyRowsetsThreshold_DEFAULT
+}
+
+func (p *TCreateTabletReq) IsSetTimeSeriesCompactionLevelThreshold() bool {
+ return p.TimeSeriesCompactionLevelThreshold != TCreateTabletReq_TimeSeriesCompactionLevelThreshold_DEFAULT
+}
+
+func (p *TCreateTabletReq) IsSetInvertedIndexStorageFormat() bool {
+ return p.InvertedIndexStorageFormat != TCreateTabletReq_InvertedIndexStorageFormat_DEFAULT
+}
+
+func (p *TCreateTabletReq) IsSetInvertedIndexFileStorageFormat() bool {
+ return p.InvertedIndexFileStorageFormat != TCreateTabletReq_InvertedIndexFileStorageFormat_DEFAULT
+}
+
+func (p *TCreateTabletReq) IsSetIsInMemory() bool {
+ return p.IsInMemory != TCreateTabletReq_IsInMemory_DEFAULT
+}
+
+func (p *TCreateTabletReq) IsSetIsPersistent() bool {
+ return p.IsPersistent != TCreateTabletReq_IsPersistent_DEFAULT
+}
+
func (p *TCreateTabletReq) Read(iprot thrift.TProtocol) (err error) {
var fieldTypeId thrift.TType
@@ -4852,10 +5978,8 @@ func (p *TCreateTabletReq) Read(iprot thrift.TProtocol) (err error) {
goto ReadFieldError
}
issetTabletId = true
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 2:
if fieldTypeId == thrift.STRUCT {
@@ -4863,227 +5987,230 @@ func (p *TCreateTabletReq) Read(iprot thrift.TProtocol) (err error) {
goto ReadFieldError
}
issetTabletSchema = true
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 3:
if fieldTypeId == thrift.I64 {
if err = p.ReadField3(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 4:
if fieldTypeId == thrift.I64 {
if err = p.ReadField4(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 5:
if fieldTypeId == thrift.I32 {
if err = p.ReadField5(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 6:
if fieldTypeId == thrift.BOOL {
if err = p.ReadField6(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 7:
if fieldTypeId == thrift.I64 {
if err = p.ReadField7(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 8:
if fieldTypeId == thrift.I32 {
if err = p.ReadField8(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 9:
if fieldTypeId == thrift.I64 {
if err = p.ReadField9(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 10:
if fieldTypeId == thrift.I64 {
if err = p.ReadField10(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 11:
if fieldTypeId == thrift.I64 {
if err = p.ReadField11(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 12:
if fieldTypeId == thrift.BOOL {
if err = p.ReadField12(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 13:
if fieldTypeId == thrift.I32 {
if err = p.ReadField13(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 14:
if fieldTypeId == thrift.I32 {
if err = p.ReadField14(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 16:
if fieldTypeId == thrift.I32 {
if err = p.ReadField16(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 17:
if fieldTypeId == thrift.I64 {
if err = p.ReadField17(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 19:
if fieldTypeId == thrift.BOOL {
if err = p.ReadField19(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 20:
if fieldTypeId == thrift.I64 {
if err = p.ReadField20(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 21:
if fieldTypeId == thrift.STRUCT {
if err = p.ReadField21(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 22:
if fieldTypeId == thrift.STRING {
if err = p.ReadField22(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 23:
if fieldTypeId == thrift.I64 {
if err = p.ReadField23(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 24:
if fieldTypeId == thrift.I64 {
if err = p.ReadField24(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 25:
if fieldTypeId == thrift.I64 {
if err = p.ReadField25(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ case 26:
+ if fieldTypeId == thrift.I64 {
+ if err = p.ReadField26(iprot); err != nil {
+ goto ReadFieldError
+ }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ case 27:
+ if fieldTypeId == thrift.I64 {
+ if err = p.ReadField27(iprot); err != nil {
+ goto ReadFieldError
+ }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ case 28:
+ if fieldTypeId == thrift.I32 {
+ if err = p.ReadField28(iprot); err != nil {
+ goto ReadFieldError
+ }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ case 29:
+ if fieldTypeId == thrift.I32 {
+ if err = p.ReadField29(iprot); err != nil {
+ goto ReadFieldError
+ }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ case 1000:
+ if fieldTypeId == thrift.BOOL {
+ if err = p.ReadField1000(iprot); err != nil {
+ goto ReadFieldError
+ }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ case 1001:
+ if fieldTypeId == thrift.BOOL {
+ if err = p.ReadField1001(iprot); err != nil {
+ goto ReadFieldError
}
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
default:
if err = iprot.Skip(fieldTypeId); err != nil {
goto SkipFieldError
}
}
-
if err = iprot.ReadFieldEnd(); err != nil {
goto ReadFieldEndError
}
@@ -5120,210 +6247,319 @@ RequiredFieldNotSetError:
}
func (p *TCreateTabletReq) ReadField1(iprot thrift.TProtocol) error {
+
+ var _field types.TTabletId
if v, err := iprot.ReadI64(); err != nil {
return err
} else {
- p.TabletId = v
+ _field = v
}
+ p.TabletId = _field
return nil
}
-
func (p *TCreateTabletReq) ReadField2(iprot thrift.TProtocol) error {
- p.TabletSchema = NewTTabletSchema()
- if err := p.TabletSchema.Read(iprot); err != nil {
+ _field := NewTTabletSchema()
+ if err := _field.Read(iprot); err != nil {
return err
}
+ p.TabletSchema = _field
return nil
}
-
func (p *TCreateTabletReq) ReadField3(iprot thrift.TProtocol) error {
+
+ var _field *types.TVersion
if v, err := iprot.ReadI64(); err != nil {
return err
} else {
- p.Version = &v
+ _field = &v
}
+ p.Version = _field
return nil
}
-
func (p *TCreateTabletReq) ReadField4(iprot thrift.TProtocol) error {
+
+ var _field *types.TVersionHash
if v, err := iprot.ReadI64(); err != nil {
return err
} else {
- p.VersionHash = &v
+ _field = &v
}
+ p.VersionHash = _field
return nil
}
-
func (p *TCreateTabletReq) ReadField5(iprot thrift.TProtocol) error {
+
+ var _field *types.TStorageMedium
if v, err := iprot.ReadI32(); err != nil {
return err
} else {
tmp := types.TStorageMedium(v)
- p.StorageMedium = &tmp
+ _field = &tmp
}
+ p.StorageMedium = _field
return nil
}
-
func (p *TCreateTabletReq) ReadField6(iprot thrift.TProtocol) error {
+
+ var _field *bool
if v, err := iprot.ReadBool(); err != nil {
return err
} else {
- p.InRestoreMode = &v
+ _field = &v
}
+ p.InRestoreMode = _field
return nil
}
-
func (p *TCreateTabletReq) ReadField7(iprot thrift.TProtocol) error {
+
+ var _field *types.TTabletId
if v, err := iprot.ReadI64(); err != nil {
return err
} else {
- p.BaseTabletId = &v
+ _field = &v
}
+ p.BaseTabletId = _field
return nil
}
-
func (p *TCreateTabletReq) ReadField8(iprot thrift.TProtocol) error {
+
+ var _field *types.TSchemaHash
if v, err := iprot.ReadI32(); err != nil {
return err
} else {
- p.BaseSchemaHash = &v
+ _field = &v
}
+ p.BaseSchemaHash = _field
return nil
}
-
func (p *TCreateTabletReq) ReadField9(iprot thrift.TProtocol) error {
+
+ var _field *int64
if v, err := iprot.ReadI64(); err != nil {
return err
} else {
- p.TableId = &v
+ _field = &v
}
+ p.TableId = _field
return nil
}
-
func (p *TCreateTabletReq) ReadField10(iprot thrift.TProtocol) error {
+
+ var _field *int64
if v, err := iprot.ReadI64(); err != nil {
return err
} else {
- p.PartitionId = &v
+ _field = &v
}
+ p.PartitionId = _field
return nil
}
-
func (p *TCreateTabletReq) ReadField11(iprot thrift.TProtocol) error {
+
+ var _field *int64
if v, err := iprot.ReadI64(); err != nil {
return err
} else {
- p.AllocationTerm = &v
+ _field = &v
}
+ p.AllocationTerm = _field
return nil
}
-
func (p *TCreateTabletReq) ReadField12(iprot thrift.TProtocol) error {
+
+ var _field *bool
if v, err := iprot.ReadBool(); err != nil {
return err
} else {
- p.IsEcoMode = &v
+ _field = &v
}
+ p.IsEcoMode = _field
return nil
}
-
func (p *TCreateTabletReq) ReadField13(iprot thrift.TProtocol) error {
+
+ var _field *TStorageFormat
if v, err := iprot.ReadI32(); err != nil {
return err
} else {
tmp := TStorageFormat(v)
- p.StorageFormat = &tmp
+ _field = &tmp
}
+ p.StorageFormat = _field
return nil
}
-
func (p *TCreateTabletReq) ReadField14(iprot thrift.TProtocol) error {
+
+ var _field *TTabletType
if v, err := iprot.ReadI32(); err != nil {
return err
} else {
tmp := TTabletType(v)
- p.TabletType = &tmp
+ _field = &tmp
}
+ p.TabletType = _field
return nil
}
-
func (p *TCreateTabletReq) ReadField16(iprot thrift.TProtocol) error {
+
+ var _field TCompressionType
if v, err := iprot.ReadI32(); err != nil {
return err
} else {
- p.CompressionType = TCompressionType(v)
+ _field = TCompressionType(v)
}
+ p.CompressionType = _field
return nil
}
-
func (p *TCreateTabletReq) ReadField17(iprot thrift.TProtocol) error {
+
+ var _field types.TReplicaId
if v, err := iprot.ReadI64(); err != nil {
return err
} else {
- p.ReplicaId = v
+ _field = v
}
+ p.ReplicaId = _field
return nil
}
-
func (p *TCreateTabletReq) ReadField19(iprot thrift.TProtocol) error {
+
+ var _field bool
if v, err := iprot.ReadBool(); err != nil {
return err
} else {
- p.EnableUniqueKeyMergeOnWrite = v
+ _field = v
}
+ p.EnableUniqueKeyMergeOnWrite = _field
return nil
}
-
func (p *TCreateTabletReq) ReadField20(iprot thrift.TProtocol) error {
+
+ var _field *int64
if v, err := iprot.ReadI64(); err != nil {
return err
} else {
- p.StoragePolicyId = &v
+ _field = &v
}
+ p.StoragePolicyId = _field
return nil
}
-
func (p *TCreateTabletReq) ReadField21(iprot thrift.TProtocol) error {
- p.BinlogConfig = NewTBinlogConfig()
- if err := p.BinlogConfig.Read(iprot); err != nil {
+ _field := NewTBinlogConfig()
+ if err := _field.Read(iprot); err != nil {
return err
}
+ p.BinlogConfig = _field
return nil
}
-
func (p *TCreateTabletReq) ReadField22(iprot thrift.TProtocol) error {
+
+ var _field string
if v, err := iprot.ReadString(); err != nil {
return err
} else {
- p.CompactionPolicy = v
+ _field = v
}
+ p.CompactionPolicy = _field
return nil
}
-
func (p *TCreateTabletReq) ReadField23(iprot thrift.TProtocol) error {
+
+ var _field int64
if v, err := iprot.ReadI64(); err != nil {
return err
} else {
- p.TimeSeriesCompactionGoalSizeMbytes = v
+ _field = v
}
+ p.TimeSeriesCompactionGoalSizeMbytes = _field
return nil
}
-
func (p *TCreateTabletReq) ReadField24(iprot thrift.TProtocol) error {
+
+ var _field int64
if v, err := iprot.ReadI64(); err != nil {
return err
} else {
- p.TimeSeriesCompactionFileCountThreshold = v
+ _field = v
}
+ p.TimeSeriesCompactionFileCountThreshold = _field
return nil
}
-
func (p *TCreateTabletReq) ReadField25(iprot thrift.TProtocol) error {
+
+ var _field int64
+ if v, err := iprot.ReadI64(); err != nil {
+ return err
+ } else {
+ _field = v
+ }
+ p.TimeSeriesCompactionTimeThresholdSeconds = _field
+ return nil
+}
+func (p *TCreateTabletReq) ReadField26(iprot thrift.TProtocol) error {
+
+ var _field int64
if v, err := iprot.ReadI64(); err != nil {
return err
} else {
- p.TimeSeriesCompactionTimeThresholdSeconds = v
+ _field = v
}
+ p.TimeSeriesCompactionEmptyRowsetsThreshold = _field
+ return nil
+}
+func (p *TCreateTabletReq) ReadField27(iprot thrift.TProtocol) error {
+
+ var _field int64
+ if v, err := iprot.ReadI64(); err != nil {
+ return err
+ } else {
+ _field = v
+ }
+ p.TimeSeriesCompactionLevelThreshold = _field
+ return nil
+}
+func (p *TCreateTabletReq) ReadField28(iprot thrift.TProtocol) error {
+
+ var _field TInvertedIndexStorageFormat
+ if v, err := iprot.ReadI32(); err != nil {
+ return err
+ } else {
+ _field = TInvertedIndexStorageFormat(v)
+ }
+ p.InvertedIndexStorageFormat = _field
+ return nil
+}
+func (p *TCreateTabletReq) ReadField29(iprot thrift.TProtocol) error {
+
+ var _field types.TInvertedIndexFileStorageFormat
+ if v, err := iprot.ReadI32(); err != nil {
+ return err
+ } else {
+ _field = types.TInvertedIndexFileStorageFormat(v)
+ }
+ p.InvertedIndexFileStorageFormat = _field
+ return nil
+}
+func (p *TCreateTabletReq) ReadField1000(iprot thrift.TProtocol) error {
+
+ var _field bool
+ if v, err := iprot.ReadBool(); err != nil {
+ return err
+ } else {
+ _field = v
+ }
+ p.IsInMemory = _field
+ return nil
+}
+func (p *TCreateTabletReq) ReadField1001(iprot thrift.TProtocol) error {
+
+ var _field bool
+ if v, err := iprot.ReadBool(); err != nil {
+ return err
+ } else {
+ _field = v
+ }
+ p.IsPersistent = _field
return nil
}
@@ -5425,7 +6661,30 @@ func (p *TCreateTabletReq) Write(oprot thrift.TProtocol) (err error) {
fieldId = 25
goto WriteFieldError
}
-
+ if err = p.writeField26(oprot); err != nil {
+ fieldId = 26
+ goto WriteFieldError
+ }
+ if err = p.writeField27(oprot); err != nil {
+ fieldId = 27
+ goto WriteFieldError
+ }
+ if err = p.writeField28(oprot); err != nil {
+ fieldId = 28
+ goto WriteFieldError
+ }
+ if err = p.writeField29(oprot); err != nil {
+ fieldId = 29
+ goto WriteFieldError
+ }
+ if err = p.writeField1000(oprot); err != nil {
+ fieldId = 1000
+ goto WriteFieldError
+ }
+ if err = p.writeField1001(oprot); err != nil {
+ fieldId = 1001
+ goto WriteFieldError
+ }
}
if err = oprot.WriteFieldStop(); err != nil {
goto WriteFieldStopError
@@ -5877,39 +7136,154 @@ WriteFieldEndError:
return thrift.PrependError(fmt.Sprintf("%T write field 25 end error: ", p), err)
}
-func (p *TCreateTabletReq) String() string {
- if p == nil {
- return ""
+func (p *TCreateTabletReq) writeField26(oprot thrift.TProtocol) (err error) {
+ if p.IsSetTimeSeriesCompactionEmptyRowsetsThreshold() {
+ if err = oprot.WriteFieldBegin("time_series_compaction_empty_rowsets_threshold", thrift.I64, 26); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteI64(p.TimeSeriesCompactionEmptyRowsetsThreshold); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
}
- return fmt.Sprintf("TCreateTabletReq(%+v)", *p)
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 26 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 26 end error: ", p), err)
}
-func (p *TCreateTabletReq) DeepEqual(ano *TCreateTabletReq) bool {
- if p == ano {
- return true
- } else if p == nil || ano == nil {
- return false
- }
- if !p.Field1DeepEqual(ano.TabletId) {
- return false
- }
- if !p.Field2DeepEqual(ano.TabletSchema) {
- return false
- }
- if !p.Field3DeepEqual(ano.Version) {
- return false
- }
- if !p.Field4DeepEqual(ano.VersionHash) {
- return false
+func (p *TCreateTabletReq) writeField27(oprot thrift.TProtocol) (err error) {
+ if p.IsSetTimeSeriesCompactionLevelThreshold() {
+ if err = oprot.WriteFieldBegin("time_series_compaction_level_threshold", thrift.I64, 27); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteI64(p.TimeSeriesCompactionLevelThreshold); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
}
- if !p.Field5DeepEqual(ano.StorageMedium) {
- return false
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 27 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 27 end error: ", p), err)
+}
+
+func (p *TCreateTabletReq) writeField28(oprot thrift.TProtocol) (err error) {
+ if p.IsSetInvertedIndexStorageFormat() {
+ if err = oprot.WriteFieldBegin("inverted_index_storage_format", thrift.I32, 28); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteI32(int32(p.InvertedIndexStorageFormat)); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
}
- if !p.Field6DeepEqual(ano.InRestoreMode) {
- return false
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 28 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 28 end error: ", p), err)
+}
+
+func (p *TCreateTabletReq) writeField29(oprot thrift.TProtocol) (err error) {
+ if p.IsSetInvertedIndexFileStorageFormat() {
+ if err = oprot.WriteFieldBegin("inverted_index_file_storage_format", thrift.I32, 29); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteI32(int32(p.InvertedIndexFileStorageFormat)); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
}
- if !p.Field7DeepEqual(ano.BaseTabletId) {
- return false
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 29 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 29 end error: ", p), err)
+}
+
+func (p *TCreateTabletReq) writeField1000(oprot thrift.TProtocol) (err error) {
+ if p.IsSetIsInMemory() {
+ if err = oprot.WriteFieldBegin("is_in_memory", thrift.BOOL, 1000); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteBool(p.IsInMemory); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 1000 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 1000 end error: ", p), err)
+}
+
+func (p *TCreateTabletReq) writeField1001(oprot thrift.TProtocol) (err error) {
+ if p.IsSetIsPersistent() {
+ if err = oprot.WriteFieldBegin("is_persistent", thrift.BOOL, 1001); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteBool(p.IsPersistent); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 1001 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 1001 end error: ", p), err)
+}
+
+func (p *TCreateTabletReq) String() string {
+ if p == nil {
+ return ""
+ }
+ return fmt.Sprintf("TCreateTabletReq(%+v)", *p)
+
+}
+
+func (p *TCreateTabletReq) DeepEqual(ano *TCreateTabletReq) bool {
+ if p == ano {
+ return true
+ } else if p == nil || ano == nil {
+ return false
+ }
+ if !p.Field1DeepEqual(ano.TabletId) {
+ return false
+ }
+ if !p.Field2DeepEqual(ano.TabletSchema) {
+ return false
+ }
+ if !p.Field3DeepEqual(ano.Version) {
+ return false
+ }
+ if !p.Field4DeepEqual(ano.VersionHash) {
+ return false
+ }
+ if !p.Field5DeepEqual(ano.StorageMedium) {
+ return false
+ }
+ if !p.Field6DeepEqual(ano.InRestoreMode) {
+ return false
+ }
+ if !p.Field7DeepEqual(ano.BaseTabletId) {
+ return false
}
if !p.Field8DeepEqual(ano.BaseSchemaHash) {
return false
@@ -5959,6 +7333,24 @@ func (p *TCreateTabletReq) DeepEqual(ano *TCreateTabletReq) bool {
if !p.Field25DeepEqual(ano.TimeSeriesCompactionTimeThresholdSeconds) {
return false
}
+ if !p.Field26DeepEqual(ano.TimeSeriesCompactionEmptyRowsetsThreshold) {
+ return false
+ }
+ if !p.Field27DeepEqual(ano.TimeSeriesCompactionLevelThreshold) {
+ return false
+ }
+ if !p.Field28DeepEqual(ano.InvertedIndexStorageFormat) {
+ return false
+ }
+ if !p.Field29DeepEqual(ano.InvertedIndexFileStorageFormat) {
+ return false
+ }
+ if !p.Field1000DeepEqual(ano.IsInMemory) {
+ return false
+ }
+ if !p.Field1001DeepEqual(ano.IsPersistent) {
+ return false
+ }
return true
}
@@ -6188,6 +7580,48 @@ func (p *TCreateTabletReq) Field25DeepEqual(src int64) bool {
}
return true
}
+func (p *TCreateTabletReq) Field26DeepEqual(src int64) bool {
+
+ if p.TimeSeriesCompactionEmptyRowsetsThreshold != src {
+ return false
+ }
+ return true
+}
+func (p *TCreateTabletReq) Field27DeepEqual(src int64) bool {
+
+ if p.TimeSeriesCompactionLevelThreshold != src {
+ return false
+ }
+ return true
+}
+func (p *TCreateTabletReq) Field28DeepEqual(src TInvertedIndexStorageFormat) bool {
+
+ if p.InvertedIndexStorageFormat != src {
+ return false
+ }
+ return true
+}
+func (p *TCreateTabletReq) Field29DeepEqual(src types.TInvertedIndexFileStorageFormat) bool {
+
+ if p.InvertedIndexFileStorageFormat != src {
+ return false
+ }
+ return true
+}
+func (p *TCreateTabletReq) Field1000DeepEqual(src bool) bool {
+
+ if p.IsInMemory != src {
+ return false
+ }
+ return true
+}
+func (p *TCreateTabletReq) Field1001DeepEqual(src bool) bool {
+
+ if p.IsPersistent != src {
+ return false
+ }
+ return true
+}
type TDropTabletReq struct {
TabletId types.TTabletId `thrift:"tablet_id,1,required" frugal:"1,required,i64" json:"tablet_id"`
@@ -6205,11 +7639,8 @@ func NewTDropTabletReq() *TDropTabletReq {
}
func (p *TDropTabletReq) InitDefault() {
- *p = TDropTabletReq{
-
- ReplicaId: 0,
- IsDropTableOrPartition: false,
- }
+ p.ReplicaId = 0
+ p.IsDropTableOrPartition = false
}
func (p *TDropTabletReq) GetTabletId() (v types.TTabletId) {
@@ -6300,47 +7731,38 @@ func (p *TDropTabletReq) Read(iprot thrift.TProtocol) (err error) {
goto ReadFieldError
}
issetTabletId = true
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 2:
if fieldTypeId == thrift.I32 {
if err = p.ReadField2(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 3:
if fieldTypeId == thrift.I64 {
if err = p.ReadField3(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 4:
if fieldTypeId == thrift.BOOL {
if err = p.ReadField4(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
default:
if err = iprot.Skip(fieldTypeId); err != nil {
goto SkipFieldError
}
}
-
if err = iprot.ReadFieldEnd(); err != nil {
goto ReadFieldEndError
}
@@ -6372,38 +7794,47 @@ RequiredFieldNotSetError:
}
func (p *TDropTabletReq) ReadField1(iprot thrift.TProtocol) error {
+
+ var _field types.TTabletId
if v, err := iprot.ReadI64(); err != nil {
return err
} else {
- p.TabletId = v
+ _field = v
}
+ p.TabletId = _field
return nil
}
-
func (p *TDropTabletReq) ReadField2(iprot thrift.TProtocol) error {
+
+ var _field *types.TSchemaHash
if v, err := iprot.ReadI32(); err != nil {
return err
} else {
- p.SchemaHash = &v
+ _field = &v
}
+ p.SchemaHash = _field
return nil
}
-
func (p *TDropTabletReq) ReadField3(iprot thrift.TProtocol) error {
+
+ var _field types.TReplicaId
if v, err := iprot.ReadI64(); err != nil {
return err
} else {
- p.ReplicaId = v
+ _field = v
}
+ p.ReplicaId = _field
return nil
}
-
func (p *TDropTabletReq) ReadField4(iprot thrift.TProtocol) error {
+
+ var _field bool
if v, err := iprot.ReadBool(); err != nil {
return err
} else {
- p.IsDropTableOrPartition = v
+ _field = v
}
+ p.IsDropTableOrPartition = _field
return nil
}
@@ -6429,7 +7860,6 @@ func (p *TDropTabletReq) Write(oprot thrift.TProtocol) (err error) {
fieldId = 4
goto WriteFieldError
}
-
}
if err = oprot.WriteFieldStop(); err != nil {
goto WriteFieldStopError
@@ -6527,6 +7957,7 @@ func (p *TDropTabletReq) String() string {
return ""
}
return fmt.Sprintf("TDropTabletReq(%+v)", *p)
+
}
func (p *TDropTabletReq) DeepEqual(ano *TDropTabletReq) bool {
@@ -6595,7 +8026,6 @@ func NewTAlterTabletReq() *TAlterTabletReq {
}
func (p *TAlterTabletReq) InitDefault() {
- *p = TAlterTabletReq{}
}
func (p *TAlterTabletReq) GetBaseTabletId() (v types.TTabletId) {
@@ -6662,10 +8092,8 @@ func (p *TAlterTabletReq) Read(iprot thrift.TProtocol) (err error) {
goto ReadFieldError
}
issetBaseTabletId = true
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 2:
if fieldTypeId == thrift.I32 {
@@ -6673,10 +8101,8 @@ func (p *TAlterTabletReq) Read(iprot thrift.TProtocol) (err error) {
goto ReadFieldError
}
issetBaseSchemaHash = true
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 3:
if fieldTypeId == thrift.STRUCT {
@@ -6684,17 +8110,14 @@ func (p *TAlterTabletReq) Read(iprot thrift.TProtocol) (err error) {
goto ReadFieldError
}
issetNewTabletReq_ = true
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
default:
if err = iprot.Skip(fieldTypeId); err != nil {
goto SkipFieldError
}
}
-
if err = iprot.ReadFieldEnd(); err != nil {
goto ReadFieldEndError
}
@@ -6736,28 +8159,33 @@ RequiredFieldNotSetError:
}
func (p *TAlterTabletReq) ReadField1(iprot thrift.TProtocol) error {
+
+ var _field types.TTabletId
if v, err := iprot.ReadI64(); err != nil {
return err
} else {
- p.BaseTabletId = v
+ _field = v
}
+ p.BaseTabletId = _field
return nil
}
-
func (p *TAlterTabletReq) ReadField2(iprot thrift.TProtocol) error {
+
+ var _field types.TSchemaHash
if v, err := iprot.ReadI32(); err != nil {
return err
} else {
- p.BaseSchemaHash = v
+ _field = v
}
+ p.BaseSchemaHash = _field
return nil
}
-
func (p *TAlterTabletReq) ReadField3(iprot thrift.TProtocol) error {
- p.NewTabletReq_ = NewTCreateTabletReq()
- if err := p.NewTabletReq_.Read(iprot); err != nil {
+ _field := NewTCreateTabletReq()
+ if err := _field.Read(iprot); err != nil {
return err
}
+ p.NewTabletReq_ = _field
return nil
}
@@ -6779,7 +8207,6 @@ func (p *TAlterTabletReq) Write(oprot thrift.TProtocol) (err error) {
fieldId = 3
goto WriteFieldError
}
-
}
if err = oprot.WriteFieldStop(); err != nil {
goto WriteFieldStopError
@@ -6854,6 +8281,7 @@ func (p *TAlterTabletReq) String() string {
return ""
}
return fmt.Sprintf("TAlterTabletReq(%+v)", *p)
+
}
func (p *TAlterTabletReq) DeepEqual(ano *TAlterTabletReq) bool {
@@ -6907,7 +8335,6 @@ func NewTAlterMaterializedViewParam() *TAlterMaterializedViewParam {
}
func (p *TAlterMaterializedViewParam) InitDefault() {
- *p = TAlterMaterializedViewParam{}
}
func (p *TAlterMaterializedViewParam) GetColumnName() (v string) {
@@ -6981,37 +8408,30 @@ func (p *TAlterMaterializedViewParam) Read(iprot thrift.TProtocol) (err error) {
goto ReadFieldError
}
issetColumnName = true
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 2:
if fieldTypeId == thrift.STRING {
if err = p.ReadField2(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 3:
if fieldTypeId == thrift.STRUCT {
if err = p.ReadField3(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
default:
if err = iprot.Skip(fieldTypeId); err != nil {
goto SkipFieldError
}
}
-
if err = iprot.ReadFieldEnd(); err != nil {
goto ReadFieldEndError
}
@@ -7043,28 +8463,33 @@ RequiredFieldNotSetError:
}
func (p *TAlterMaterializedViewParam) ReadField1(iprot thrift.TProtocol) error {
+
+ var _field string
if v, err := iprot.ReadString(); err != nil {
return err
} else {
- p.ColumnName = v
+ _field = v
}
+ p.ColumnName = _field
return nil
}
-
func (p *TAlterMaterializedViewParam) ReadField2(iprot thrift.TProtocol) error {
+
+ var _field *string
if v, err := iprot.ReadString(); err != nil {
return err
} else {
- p.OriginColumnName = &v
+ _field = &v
}
+ p.OriginColumnName = _field
return nil
}
-
func (p *TAlterMaterializedViewParam) ReadField3(iprot thrift.TProtocol) error {
- p.MvExpr = exprs.NewTExpr()
- if err := p.MvExpr.Read(iprot); err != nil {
+ _field := exprs.NewTExpr()
+ if err := _field.Read(iprot); err != nil {
return err
}
+ p.MvExpr = _field
return nil
}
@@ -7086,7 +8511,6 @@ func (p *TAlterMaterializedViewParam) Write(oprot thrift.TProtocol) (err error)
fieldId = 3
goto WriteFieldError
}
-
}
if err = oprot.WriteFieldStop(); err != nil {
goto WriteFieldStopError
@@ -7165,6 +8589,7 @@ func (p *TAlterMaterializedViewParam) String() string {
return ""
}
return fmt.Sprintf("TAlterMaterializedViewParam(%+v)", *p)
+
}
func (p *TAlterMaterializedViewParam) DeepEqual(ano *TAlterMaterializedViewParam) bool {
@@ -7224,6 +8649,9 @@ type TAlterTabletReqV2 struct {
DescTbl *descriptors.TDescriptorTable `thrift:"desc_tbl,9,optional" frugal:"9,optional,descriptors.TDescriptorTable" json:"desc_tbl,omitempty"`
Columns []*descriptors.TColumn `thrift:"columns,10,optional" frugal:"10,optional,list" json:"columns,omitempty"`
BeExecVersion int32 `thrift:"be_exec_version,11,optional" frugal:"11,optional,i32" json:"be_exec_version,omitempty"`
+ JobId *int64 `thrift:"job_id,1000,optional" frugal:"1000,optional,i64" json:"job_id,omitempty"`
+ Expiration *int64 `thrift:"expiration,1001,optional" frugal:"1001,optional,i64" json:"expiration,omitempty"`
+ StorageVaultId *string `thrift:"storage_vault_id,1002,optional" frugal:"1002,optional,string" json:"storage_vault_id,omitempty"`
}
func NewTAlterTabletReqV2() *TAlterTabletReqV2 {
@@ -7235,11 +8663,8 @@ func NewTAlterTabletReqV2() *TAlterTabletReqV2 {
}
func (p *TAlterTabletReqV2) InitDefault() {
- *p = TAlterTabletReqV2{
-
- AlterTabletType: TAlterTabletType_SCHEMA_CHANGE,
- BeExecVersion: 0,
- }
+ p.AlterTabletType = TAlterTabletType_SCHEMA_CHANGE
+ p.BeExecVersion = 0
}
func (p *TAlterTabletReqV2) GetBaseTabletId() (v types.TTabletId) {
@@ -7320,6 +8745,33 @@ func (p *TAlterTabletReqV2) GetBeExecVersion() (v int32) {
}
return p.BeExecVersion
}
+
+var TAlterTabletReqV2_JobId_DEFAULT int64
+
+func (p *TAlterTabletReqV2) GetJobId() (v int64) {
+ if !p.IsSetJobId() {
+ return TAlterTabletReqV2_JobId_DEFAULT
+ }
+ return *p.JobId
+}
+
+var TAlterTabletReqV2_Expiration_DEFAULT int64
+
+func (p *TAlterTabletReqV2) GetExpiration() (v int64) {
+ if !p.IsSetExpiration() {
+ return TAlterTabletReqV2_Expiration_DEFAULT
+ }
+ return *p.Expiration
+}
+
+var TAlterTabletReqV2_StorageVaultId_DEFAULT string
+
+func (p *TAlterTabletReqV2) GetStorageVaultId() (v string) {
+ if !p.IsSetStorageVaultId() {
+ return TAlterTabletReqV2_StorageVaultId_DEFAULT
+ }
+ return *p.StorageVaultId
+}
func (p *TAlterTabletReqV2) SetBaseTabletId(val types.TTabletId) {
p.BaseTabletId = val
}
@@ -7353,19 +8805,31 @@ func (p *TAlterTabletReqV2) SetColumns(val []*descriptors.TColumn) {
func (p *TAlterTabletReqV2) SetBeExecVersion(val int32) {
p.BeExecVersion = val
}
+func (p *TAlterTabletReqV2) SetJobId(val *int64) {
+ p.JobId = val
+}
+func (p *TAlterTabletReqV2) SetExpiration(val *int64) {
+ p.Expiration = val
+}
+func (p *TAlterTabletReqV2) SetStorageVaultId(val *string) {
+ p.StorageVaultId = val
+}
var fieldIDToName_TAlterTabletReqV2 = map[int16]string{
- 1: "base_tablet_id",
- 2: "new_tablet_id",
- 3: "base_schema_hash",
- 4: "new_schema_hash",
- 5: "alter_version",
- 6: "alter_version_hash",
- 7: "materialized_view_params",
- 8: "alter_tablet_type",
- 9: "desc_tbl",
- 10: "columns",
- 11: "be_exec_version",
+ 1: "base_tablet_id",
+ 2: "new_tablet_id",
+ 3: "base_schema_hash",
+ 4: "new_schema_hash",
+ 5: "alter_version",
+ 6: "alter_version_hash",
+ 7: "materialized_view_params",
+ 8: "alter_tablet_type",
+ 9: "desc_tbl",
+ 10: "columns",
+ 11: "be_exec_version",
+ 1000: "job_id",
+ 1001: "expiration",
+ 1002: "storage_vault_id",
}
func (p *TAlterTabletReqV2) IsSetAlterVersion() bool {
@@ -7396,6 +8860,18 @@ func (p *TAlterTabletReqV2) IsSetBeExecVersion() bool {
return p.BeExecVersion != TAlterTabletReqV2_BeExecVersion_DEFAULT
}
+func (p *TAlterTabletReqV2) IsSetJobId() bool {
+ return p.JobId != nil
+}
+
+func (p *TAlterTabletReqV2) IsSetExpiration() bool {
+ return p.Expiration != nil
+}
+
+func (p *TAlterTabletReqV2) IsSetStorageVaultId() bool {
+ return p.StorageVaultId != nil
+}
+
func (p *TAlterTabletReqV2) Read(iprot thrift.TProtocol) (err error) {
var fieldTypeId thrift.TType
@@ -7425,10 +8901,8 @@ func (p *TAlterTabletReqV2) Read(iprot thrift.TProtocol) (err error) {
goto ReadFieldError
}
issetBaseTabletId = true
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 2:
if fieldTypeId == thrift.I64 {
@@ -7436,10 +8910,8 @@ func (p *TAlterTabletReqV2) Read(iprot thrift.TProtocol) (err error) {
goto ReadFieldError
}
issetNewTabletId_ = true
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 3:
if fieldTypeId == thrift.I32 {
@@ -7447,10 +8919,8 @@ func (p *TAlterTabletReqV2) Read(iprot thrift.TProtocol) (err error) {
goto ReadFieldError
}
issetBaseSchemaHash = true
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 4:
if fieldTypeId == thrift.I32 {
@@ -7458,87 +8928,94 @@ func (p *TAlterTabletReqV2) Read(iprot thrift.TProtocol) (err error) {
goto ReadFieldError
}
issetNewSchemaHash_ = true
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 5:
if fieldTypeId == thrift.I64 {
if err = p.ReadField5(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 6:
if fieldTypeId == thrift.I64 {
if err = p.ReadField6(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 7:
if fieldTypeId == thrift.LIST {
if err = p.ReadField7(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 8:
if fieldTypeId == thrift.I32 {
if err = p.ReadField8(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 9:
if fieldTypeId == thrift.STRUCT {
if err = p.ReadField9(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 10:
if fieldTypeId == thrift.LIST {
if err = p.ReadField10(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 11:
if fieldTypeId == thrift.I32 {
if err = p.ReadField11(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ case 1000:
+ if fieldTypeId == thrift.I64 {
+ if err = p.ReadField1000(iprot); err != nil {
+ goto ReadFieldError
+ }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ case 1001:
+ if fieldTypeId == thrift.I64 {
+ if err = p.ReadField1001(iprot); err != nil {
+ goto ReadFieldError
+ }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ case 1002:
+ if fieldTypeId == thrift.STRING {
+ if err = p.ReadField1002(iprot); err != nil {
+ goto ReadFieldError
}
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
default:
if err = iprot.Skip(fieldTypeId); err != nil {
goto SkipFieldError
}
}
-
if err = iprot.ReadFieldEnd(); err != nil {
goto ReadFieldEndError
}
@@ -7585,134 +9062,190 @@ RequiredFieldNotSetError:
}
func (p *TAlterTabletReqV2) ReadField1(iprot thrift.TProtocol) error {
+
+ var _field types.TTabletId
if v, err := iprot.ReadI64(); err != nil {
return err
} else {
- p.BaseTabletId = v
+ _field = v
}
+ p.BaseTabletId = _field
return nil
}
-
func (p *TAlterTabletReqV2) ReadField2(iprot thrift.TProtocol) error {
+
+ var _field types.TTabletId
if v, err := iprot.ReadI64(); err != nil {
return err
} else {
- p.NewTabletId_ = v
+ _field = v
}
+ p.NewTabletId_ = _field
return nil
}
-
func (p *TAlterTabletReqV2) ReadField3(iprot thrift.TProtocol) error {
+
+ var _field types.TSchemaHash
if v, err := iprot.ReadI32(); err != nil {
return err
} else {
- p.BaseSchemaHash = v
+ _field = v
}
+ p.BaseSchemaHash = _field
return nil
}
-
func (p *TAlterTabletReqV2) ReadField4(iprot thrift.TProtocol) error {
+
+ var _field types.TSchemaHash
if v, err := iprot.ReadI32(); err != nil {
return err
} else {
- p.NewSchemaHash_ = v
+ _field = v
}
+ p.NewSchemaHash_ = _field
return nil
}
-
func (p *TAlterTabletReqV2) ReadField5(iprot thrift.TProtocol) error {
+
+ var _field *types.TVersion
if v, err := iprot.ReadI64(); err != nil {
return err
} else {
- p.AlterVersion = &v
+ _field = &v
}
+ p.AlterVersion = _field
return nil
}
-
func (p *TAlterTabletReqV2) ReadField6(iprot thrift.TProtocol) error {
+
+ var _field *types.TVersionHash
if v, err := iprot.ReadI64(); err != nil {
return err
} else {
- p.AlterVersionHash = &v
+ _field = &v
}
+ p.AlterVersionHash = _field
return nil
}
-
func (p *TAlterTabletReqV2) ReadField7(iprot thrift.TProtocol) error {
_, size, err := iprot.ReadListBegin()
if err != nil {
return err
}
- p.MaterializedViewParams = make([]*TAlterMaterializedViewParam, 0, size)
+ _field := make([]*TAlterMaterializedViewParam, 0, size)
+ values := make([]TAlterMaterializedViewParam, size)
for i := 0; i < size; i++ {
- _elem := NewTAlterMaterializedViewParam()
+ _elem := &values[i]
+ _elem.InitDefault()
+
if err := _elem.Read(iprot); err != nil {
return err
}
- p.MaterializedViewParams = append(p.MaterializedViewParams, _elem)
+ _field = append(_field, _elem)
}
if err := iprot.ReadListEnd(); err != nil {
return err
}
+ p.MaterializedViewParams = _field
return nil
}
-
func (p *TAlterTabletReqV2) ReadField8(iprot thrift.TProtocol) error {
+
+ var _field TAlterTabletType
if v, err := iprot.ReadI32(); err != nil {
return err
} else {
- p.AlterTabletType = TAlterTabletType(v)
+ _field = TAlterTabletType(v)
}
+ p.AlterTabletType = _field
return nil
}
-
func (p *TAlterTabletReqV2) ReadField9(iprot thrift.TProtocol) error {
- p.DescTbl = descriptors.NewTDescriptorTable()
- if err := p.DescTbl.Read(iprot); err != nil {
+ _field := descriptors.NewTDescriptorTable()
+ if err := _field.Read(iprot); err != nil {
return err
}
+ p.DescTbl = _field
return nil
}
-
func (p *TAlterTabletReqV2) ReadField10(iprot thrift.TProtocol) error {
_, size, err := iprot.ReadListBegin()
if err != nil {
return err
}
- p.Columns = make([]*descriptors.TColumn, 0, size)
+ _field := make([]*descriptors.TColumn, 0, size)
+ values := make([]descriptors.TColumn, size)
for i := 0; i < size; i++ {
- _elem := descriptors.NewTColumn()
+ _elem := &values[i]
+ _elem.InitDefault()
+
if err := _elem.Read(iprot); err != nil {
return err
}
- p.Columns = append(p.Columns, _elem)
+ _field = append(_field, _elem)
}
if err := iprot.ReadListEnd(); err != nil {
return err
}
+ p.Columns = _field
return nil
}
-
func (p *TAlterTabletReqV2) ReadField11(iprot thrift.TProtocol) error {
+
+ var _field int32
if v, err := iprot.ReadI32(); err != nil {
return err
} else {
- p.BeExecVersion = v
+ _field = v
}
+ p.BeExecVersion = _field
return nil
}
+func (p *TAlterTabletReqV2) ReadField1000(iprot thrift.TProtocol) error {
-func (p *TAlterTabletReqV2) Write(oprot thrift.TProtocol) (err error) {
- var fieldId int16
- if err = oprot.WriteStructBegin("TAlterTabletReqV2"); err != nil {
- goto WriteStructBeginError
+ var _field *int64
+ if v, err := iprot.ReadI64(); err != nil {
+ return err
+ } else {
+ _field = &v
}
- if p != nil {
- if err = p.writeField1(oprot); err != nil {
- fieldId = 1
- goto WriteFieldError
+ p.JobId = _field
+ return nil
+}
+func (p *TAlterTabletReqV2) ReadField1001(iprot thrift.TProtocol) error {
+
+ var _field *int64
+ if v, err := iprot.ReadI64(); err != nil {
+ return err
+ } else {
+ _field = &v
+ }
+ p.Expiration = _field
+ return nil
+}
+func (p *TAlterTabletReqV2) ReadField1002(iprot thrift.TProtocol) error {
+
+ var _field *string
+ if v, err := iprot.ReadString(); err != nil {
+ return err
+ } else {
+ _field = &v
+ }
+ p.StorageVaultId = _field
+ return nil
+}
+
+func (p *TAlterTabletReqV2) Write(oprot thrift.TProtocol) (err error) {
+ var fieldId int16
+ if err = oprot.WriteStructBegin("TAlterTabletReqV2"); err != nil {
+ goto WriteStructBeginError
+ }
+ if p != nil {
+ if err = p.writeField1(oprot); err != nil {
+ fieldId = 1
+ goto WriteFieldError
}
if err = p.writeField2(oprot); err != nil {
fieldId = 2
@@ -7754,7 +9287,18 @@ func (p *TAlterTabletReqV2) Write(oprot thrift.TProtocol) (err error) {
fieldId = 11
goto WriteFieldError
}
-
+ if err = p.writeField1000(oprot); err != nil {
+ fieldId = 1000
+ goto WriteFieldError
+ }
+ if err = p.writeField1001(oprot); err != nil {
+ fieldId = 1001
+ goto WriteFieldError
+ }
+ if err = p.writeField1002(oprot); err != nil {
+ fieldId = 1002
+ goto WriteFieldError
+ }
}
if err = oprot.WriteFieldStop(); err != nil {
goto WriteFieldStopError
@@ -7990,11 +9534,69 @@ WriteFieldEndError:
return thrift.PrependError(fmt.Sprintf("%T write field 11 end error: ", p), err)
}
+func (p *TAlterTabletReqV2) writeField1000(oprot thrift.TProtocol) (err error) {
+ if p.IsSetJobId() {
+ if err = oprot.WriteFieldBegin("job_id", thrift.I64, 1000); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteI64(*p.JobId); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 1000 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 1000 end error: ", p), err)
+}
+
+func (p *TAlterTabletReqV2) writeField1001(oprot thrift.TProtocol) (err error) {
+ if p.IsSetExpiration() {
+ if err = oprot.WriteFieldBegin("expiration", thrift.I64, 1001); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteI64(*p.Expiration); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 1001 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 1001 end error: ", p), err)
+}
+
+func (p *TAlterTabletReqV2) writeField1002(oprot thrift.TProtocol) (err error) {
+ if p.IsSetStorageVaultId() {
+ if err = oprot.WriteFieldBegin("storage_vault_id", thrift.STRING, 1002); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteString(*p.StorageVaultId); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 1002 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 1002 end error: ", p), err)
+}
+
func (p *TAlterTabletReqV2) String() string {
if p == nil {
return ""
}
return fmt.Sprintf("TAlterTabletReqV2(%+v)", *p)
+
}
func (p *TAlterTabletReqV2) DeepEqual(ano *TAlterTabletReqV2) bool {
@@ -8036,6 +9638,15 @@ func (p *TAlterTabletReqV2) DeepEqual(ano *TAlterTabletReqV2) bool {
if !p.Field11DeepEqual(ano.BeExecVersion) {
return false
}
+ if !p.Field1000DeepEqual(ano.JobId) {
+ return false
+ }
+ if !p.Field1001DeepEqual(ano.Expiration) {
+ return false
+ }
+ if !p.Field1002DeepEqual(ano.StorageVaultId) {
+ return false
+ }
return true
}
@@ -8138,6 +9749,42 @@ func (p *TAlterTabletReqV2) Field11DeepEqual(src int32) bool {
}
return true
}
+func (p *TAlterTabletReqV2) Field1000DeepEqual(src *int64) bool {
+
+ if p.JobId == src {
+ return true
+ } else if p.JobId == nil || src == nil {
+ return false
+ }
+ if *p.JobId != *src {
+ return false
+ }
+ return true
+}
+func (p *TAlterTabletReqV2) Field1001DeepEqual(src *int64) bool {
+
+ if p.Expiration == src {
+ return true
+ } else if p.Expiration == nil || src == nil {
+ return false
+ }
+ if *p.Expiration != *src {
+ return false
+ }
+ return true
+}
+func (p *TAlterTabletReqV2) Field1002DeepEqual(src *string) bool {
+
+ if p.StorageVaultId == src {
+ return true
+ } else if p.StorageVaultId == nil || src == nil {
+ return false
+ }
+ if strings.Compare(*p.StorageVaultId, *src) != 0 {
+ return false
+ }
+ return true
+}
type TAlterInvertedIndexReq struct {
TabletId types.TTabletId `thrift:"tablet_id,1,required" frugal:"1,required,i64" json:"tablet_id"`
@@ -8161,11 +9808,8 @@ func NewTAlterInvertedIndexReq() *TAlterInvertedIndexReq {
}
func (p *TAlterInvertedIndexReq) InitDefault() {
- *p = TAlterInvertedIndexReq{
-
- AlterTabletType: TAlterTabletType_SCHEMA_CHANGE,
- IsDropOp: false,
- }
+ p.AlterTabletType = TAlterTabletType_SCHEMA_CHANGE
+ p.IsDropOp = false
}
func (p *TAlterInvertedIndexReq) GetTabletId() (v types.TTabletId) {
@@ -8350,10 +9994,8 @@ func (p *TAlterInvertedIndexReq) Read(iprot thrift.TProtocol) (err error) {
goto ReadFieldError
}
issetTabletId = true
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 2:
if fieldTypeId == thrift.I32 {
@@ -8361,97 +10003,78 @@ func (p *TAlterInvertedIndexReq) Read(iprot thrift.TProtocol) (err error) {
goto ReadFieldError
}
issetSchemaHash = true
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 3:
if fieldTypeId == thrift.I64 {
if err = p.ReadField3(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 4:
if fieldTypeId == thrift.I32 {
if err = p.ReadField4(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 5:
if fieldTypeId == thrift.BOOL {
if err = p.ReadField5(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 6:
if fieldTypeId == thrift.LIST {
if err = p.ReadField6(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 7:
if fieldTypeId == thrift.LIST {
if err = p.ReadField7(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 8:
if fieldTypeId == thrift.LIST {
if err = p.ReadField8(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 9:
if fieldTypeId == thrift.I64 {
if err = p.ReadField9(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 10:
if fieldTypeId == thrift.I64 {
if err = p.ReadField10(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
default:
if err = iprot.Skip(fieldTypeId); err != nil {
goto SkipFieldError
}
}
-
if err = iprot.ReadFieldEnd(); err != nil {
goto ReadFieldEndError
}
@@ -8488,125 +10111,149 @@ RequiredFieldNotSetError:
}
func (p *TAlterInvertedIndexReq) ReadField1(iprot thrift.TProtocol) error {
+
+ var _field types.TTabletId
if v, err := iprot.ReadI64(); err != nil {
return err
} else {
- p.TabletId = v
+ _field = v
}
+ p.TabletId = _field
return nil
}
-
func (p *TAlterInvertedIndexReq) ReadField2(iprot thrift.TProtocol) error {
+
+ var _field types.TSchemaHash
if v, err := iprot.ReadI32(); err != nil {
return err
} else {
- p.SchemaHash = v
+ _field = v
}
+ p.SchemaHash = _field
return nil
}
-
func (p *TAlterInvertedIndexReq) ReadField3(iprot thrift.TProtocol) error {
+
+ var _field *types.TVersion
if v, err := iprot.ReadI64(); err != nil {
return err
} else {
- p.AlterVersion = &v
+ _field = &v
}
+ p.AlterVersion = _field
return nil
}
-
func (p *TAlterInvertedIndexReq) ReadField4(iprot thrift.TProtocol) error {
+
+ var _field TAlterTabletType
if v, err := iprot.ReadI32(); err != nil {
return err
} else {
- p.AlterTabletType = TAlterTabletType(v)
+ _field = TAlterTabletType(v)
}
+ p.AlterTabletType = _field
return nil
}
-
func (p *TAlterInvertedIndexReq) ReadField5(iprot thrift.TProtocol) error {
+
+ var _field bool
if v, err := iprot.ReadBool(); err != nil {
return err
} else {
- p.IsDropOp = v
+ _field = v
}
+ p.IsDropOp = _field
return nil
}
-
func (p *TAlterInvertedIndexReq) ReadField6(iprot thrift.TProtocol) error {
_, size, err := iprot.ReadListBegin()
if err != nil {
return err
}
- p.AlterInvertedIndexes = make([]*descriptors.TOlapTableIndex, 0, size)
+ _field := make([]*descriptors.TOlapTableIndex, 0, size)
+ values := make([]descriptors.TOlapTableIndex, size)
for i := 0; i < size; i++ {
- _elem := descriptors.NewTOlapTableIndex()
+ _elem := &values[i]
+ _elem.InitDefault()
+
if err := _elem.Read(iprot); err != nil {
return err
}
- p.AlterInvertedIndexes = append(p.AlterInvertedIndexes, _elem)
+ _field = append(_field, _elem)
}
if err := iprot.ReadListEnd(); err != nil {
return err
}
+ p.AlterInvertedIndexes = _field
return nil
}
-
func (p *TAlterInvertedIndexReq) ReadField7(iprot thrift.TProtocol) error {
_, size, err := iprot.ReadListBegin()
if err != nil {
return err
}
- p.IndexesDesc = make([]*descriptors.TOlapTableIndex, 0, size)
+ _field := make([]*descriptors.TOlapTableIndex, 0, size)
+ values := make([]descriptors.TOlapTableIndex, size)
for i := 0; i < size; i++ {
- _elem := descriptors.NewTOlapTableIndex()
+ _elem := &values[i]
+ _elem.InitDefault()
+
if err := _elem.Read(iprot); err != nil {
return err
}
- p.IndexesDesc = append(p.IndexesDesc, _elem)
+ _field = append(_field, _elem)
}
if err := iprot.ReadListEnd(); err != nil {
return err
}
+ p.IndexesDesc = _field
return nil
}
-
func (p *TAlterInvertedIndexReq) ReadField8(iprot thrift.TProtocol) error {
_, size, err := iprot.ReadListBegin()
if err != nil {
return err
}
- p.Columns = make([]*descriptors.TColumn, 0, size)
+ _field := make([]*descriptors.TColumn, 0, size)
+ values := make([]descriptors.TColumn, size)
for i := 0; i < size; i++ {
- _elem := descriptors.NewTColumn()
+ _elem := &values[i]
+ _elem.InitDefault()
+
if err := _elem.Read(iprot); err != nil {
return err
}
- p.Columns = append(p.Columns, _elem)
+ _field = append(_field, _elem)
}
if err := iprot.ReadListEnd(); err != nil {
return err
}
+ p.Columns = _field
return nil
}
-
func (p *TAlterInvertedIndexReq) ReadField9(iprot thrift.TProtocol) error {
+
+ var _field *int64
if v, err := iprot.ReadI64(); err != nil {
return err
} else {
- p.JobId = &v
+ _field = &v
}
+ p.JobId = _field
return nil
}
-
func (p *TAlterInvertedIndexReq) ReadField10(iprot thrift.TProtocol) error {
+
+ var _field *int64
if v, err := iprot.ReadI64(); err != nil {
return err
} else {
- p.Expiration = &v
+ _field = &v
}
+ p.Expiration = _field
return nil
}
@@ -8656,7 +10303,6 @@ func (p *TAlterInvertedIndexReq) Write(oprot thrift.TProtocol) (err error) {
fieldId = 10
goto WriteFieldError
}
-
}
if err = oprot.WriteFieldStop(); err != nil {
goto WriteFieldStopError
@@ -8890,6 +10536,7 @@ func (p *TAlterInvertedIndexReq) String() string {
return ""
}
return fmt.Sprintf("TAlterInvertedIndexReq(%+v)", *p)
+
}
func (p *TAlterInvertedIndexReq) DeepEqual(ano *TAlterInvertedIndexReq) bool {
@@ -9045,7 +10692,6 @@ func NewTTabletGcBinlogInfo() *TTabletGcBinlogInfo {
}
func (p *TTabletGcBinlogInfo) InitDefault() {
- *p = TTabletGcBinlogInfo{}
}
var TTabletGcBinlogInfo_TabletId_DEFAULT types.TTabletId
@@ -9109,27 +10755,22 @@ func (p *TTabletGcBinlogInfo) Read(iprot thrift.TProtocol) (err error) {
if err = p.ReadField1(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 2:
if fieldTypeId == thrift.I64 {
if err = p.ReadField2(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
default:
if err = iprot.Skip(fieldTypeId); err != nil {
goto SkipFieldError
}
}
-
if err = iprot.ReadFieldEnd(); err != nil {
goto ReadFieldEndError
}
@@ -9155,20 +10796,25 @@ ReadStructEndError:
}
func (p *TTabletGcBinlogInfo) ReadField1(iprot thrift.TProtocol) error {
+
+ var _field *types.TTabletId
if v, err := iprot.ReadI64(); err != nil {
return err
} else {
- p.TabletId = &v
+ _field = &v
}
+ p.TabletId = _field
return nil
}
-
func (p *TTabletGcBinlogInfo) ReadField2(iprot thrift.TProtocol) error {
+
+ var _field *int64
if v, err := iprot.ReadI64(); err != nil {
return err
} else {
- p.Version = &v
+ _field = &v
}
+ p.Version = _field
return nil
}
@@ -9186,7 +10832,6 @@ func (p *TTabletGcBinlogInfo) Write(oprot thrift.TProtocol) (err error) {
fieldId = 2
goto WriteFieldError
}
-
}
if err = oprot.WriteFieldStop(); err != nil {
goto WriteFieldStopError
@@ -9248,6 +10893,7 @@ func (p *TTabletGcBinlogInfo) String() string {
return ""
}
return fmt.Sprintf("TTabletGcBinlogInfo(%+v)", *p)
+
}
func (p *TTabletGcBinlogInfo) DeepEqual(ano *TTabletGcBinlogInfo) bool {
@@ -9299,7 +10945,6 @@ func NewTGcBinlogReq() *TGcBinlogReq {
}
func (p *TGcBinlogReq) InitDefault() {
- *p = TGcBinlogReq{}
}
var TGcBinlogReq_TabletGcBinlogInfos_DEFAULT []*TTabletGcBinlogInfo
@@ -9346,17 +10991,14 @@ func (p *TGcBinlogReq) Read(iprot thrift.TProtocol) (err error) {
if err = p.ReadField1(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
default:
if err = iprot.Skip(fieldTypeId); err != nil {
goto SkipFieldError
}
}
-
if err = iprot.ReadFieldEnd(); err != nil {
goto ReadFieldEndError
}
@@ -9386,18 +11028,22 @@ func (p *TGcBinlogReq) ReadField1(iprot thrift.TProtocol) error {
if err != nil {
return err
}
- p.TabletGcBinlogInfos = make([]*TTabletGcBinlogInfo, 0, size)
+ _field := make([]*TTabletGcBinlogInfo, 0, size)
+ values := make([]TTabletGcBinlogInfo, size)
for i := 0; i < size; i++ {
- _elem := NewTTabletGcBinlogInfo()
+ _elem := &values[i]
+ _elem.InitDefault()
+
if err := _elem.Read(iprot); err != nil {
return err
}
- p.TabletGcBinlogInfos = append(p.TabletGcBinlogInfos, _elem)
+ _field = append(_field, _elem)
}
if err := iprot.ReadListEnd(); err != nil {
return err
}
+ p.TabletGcBinlogInfos = _field
return nil
}
@@ -9411,7 +11057,6 @@ func (p *TGcBinlogReq) Write(oprot thrift.TProtocol) (err error) {
fieldId = 1
goto WriteFieldError
}
-
}
if err = oprot.WriteFieldStop(); err != nil {
goto WriteFieldStopError
@@ -9462,6 +11107,7 @@ func (p *TGcBinlogReq) String() string {
return ""
}
return fmt.Sprintf("TGcBinlogReq(%+v)", *p)
+
}
func (p *TGcBinlogReq) DeepEqual(ano *TGcBinlogReq) bool {
@@ -9503,7 +11149,6 @@ func NewTStorageMigrationReqV2() *TStorageMigrationReqV2 {
}
func (p *TStorageMigrationReqV2) InitDefault() {
- *p = TStorageMigrationReqV2{}
}
var TStorageMigrationReqV2_BaseTabletId_DEFAULT types.TTabletId
@@ -9618,57 +11263,46 @@ func (p *TStorageMigrationReqV2) Read(iprot thrift.TProtocol) (err error) {
if err = p.ReadField1(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 2:
if fieldTypeId == thrift.I64 {
if err = p.ReadField2(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 3:
if fieldTypeId == thrift.I32 {
if err = p.ReadField3(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 4:
if fieldTypeId == thrift.I32 {
if err = p.ReadField4(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 5:
if fieldTypeId == thrift.I64 {
if err = p.ReadField5(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
default:
if err = iprot.Skip(fieldTypeId); err != nil {
goto SkipFieldError
}
}
-
if err = iprot.ReadFieldEnd(); err != nil {
goto ReadFieldEndError
}
@@ -9694,47 +11328,58 @@ ReadStructEndError:
}
func (p *TStorageMigrationReqV2) ReadField1(iprot thrift.TProtocol) error {
+
+ var _field *types.TTabletId
if v, err := iprot.ReadI64(); err != nil {
return err
} else {
- p.BaseTabletId = &v
+ _field = &v
}
+ p.BaseTabletId = _field
return nil
}
-
func (p *TStorageMigrationReqV2) ReadField2(iprot thrift.TProtocol) error {
+
+ var _field *types.TTabletId
if v, err := iprot.ReadI64(); err != nil {
return err
} else {
- p.NewTabletId_ = &v
+ _field = &v
}
+ p.NewTabletId_ = _field
return nil
}
-
func (p *TStorageMigrationReqV2) ReadField3(iprot thrift.TProtocol) error {
+
+ var _field *types.TSchemaHash
if v, err := iprot.ReadI32(); err != nil {
return err
} else {
- p.BaseSchemaHash = &v
+ _field = &v
}
+ p.BaseSchemaHash = _field
return nil
}
-
func (p *TStorageMigrationReqV2) ReadField4(iprot thrift.TProtocol) error {
+
+ var _field *types.TSchemaHash
if v, err := iprot.ReadI32(); err != nil {
return err
} else {
- p.NewSchemaHash_ = &v
+ _field = &v
}
+ p.NewSchemaHash_ = _field
return nil
}
-
func (p *TStorageMigrationReqV2) ReadField5(iprot thrift.TProtocol) error {
+
+ var _field *types.TVersion
if v, err := iprot.ReadI64(); err != nil {
return err
} else {
- p.MigrationVersion = &v
+ _field = &v
}
+ p.MigrationVersion = _field
return nil
}
@@ -9764,7 +11409,6 @@ func (p *TStorageMigrationReqV2) Write(oprot thrift.TProtocol) (err error) {
fieldId = 5
goto WriteFieldError
}
-
}
if err = oprot.WriteFieldStop(); err != nil {
goto WriteFieldStopError
@@ -9883,6 +11527,7 @@ func (p *TStorageMigrationReqV2) String() string {
return ""
}
return fmt.Sprintf("TStorageMigrationReqV2(%+v)", *p)
+
}
func (p *TStorageMigrationReqV2) DeepEqual(ano *TStorageMigrationReqV2) bool {
@@ -9980,7 +11625,6 @@ func NewTClusterInfo() *TClusterInfo {
}
func (p *TClusterInfo) InitDefault() {
- *p = TClusterInfo{}
}
func (p *TClusterInfo) GetUser() (v string) {
@@ -10029,10 +11673,8 @@ func (p *TClusterInfo) Read(iprot thrift.TProtocol) (err error) {
goto ReadFieldError
}
issetUser = true
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 2:
if fieldTypeId == thrift.STRING {
@@ -10040,17 +11682,14 @@ func (p *TClusterInfo) Read(iprot thrift.TProtocol) (err error) {
goto ReadFieldError
}
issetPassword = true
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
default:
if err = iprot.Skip(fieldTypeId); err != nil {
goto SkipFieldError
}
}
-
if err = iprot.ReadFieldEnd(); err != nil {
goto ReadFieldEndError
}
@@ -10087,20 +11726,25 @@ RequiredFieldNotSetError:
}
func (p *TClusterInfo) ReadField1(iprot thrift.TProtocol) error {
+
+ var _field string
if v, err := iprot.ReadString(); err != nil {
return err
} else {
- p.User = v
+ _field = v
}
+ p.User = _field
return nil
}
-
func (p *TClusterInfo) ReadField2(iprot thrift.TProtocol) error {
+
+ var _field string
if v, err := iprot.ReadString(); err != nil {
return err
} else {
- p.Password = v
+ _field = v
}
+ p.Password = _field
return nil
}
@@ -10118,7 +11762,6 @@ func (p *TClusterInfo) Write(oprot thrift.TProtocol) (err error) {
fieldId = 2
goto WriteFieldError
}
-
}
if err = oprot.WriteFieldStop(); err != nil {
goto WriteFieldStopError
@@ -10176,6 +11819,7 @@ func (p *TClusterInfo) String() string {
return ""
}
return fmt.Sprintf("TClusterInfo(%+v)", *p)
+
}
func (p *TClusterInfo) DeepEqual(ano *TClusterInfo) bool {
@@ -10225,6 +11869,8 @@ type TPushReq struct {
BrokerScanRange *plannodes.TBrokerScanRange `thrift:"broker_scan_range,14,optional" frugal:"14,optional,plannodes.TBrokerScanRange" json:"broker_scan_range,omitempty"`
DescTbl *descriptors.TDescriptorTable `thrift:"desc_tbl,15,optional" frugal:"15,optional,descriptors.TDescriptorTable" json:"desc_tbl,omitempty"`
ColumnsDesc []*descriptors.TColumn `thrift:"columns_desc,16,optional" frugal:"16,optional,list" json:"columns_desc,omitempty"`
+ StorageVaultId *string `thrift:"storage_vault_id,17,optional" frugal:"17,optional,string" json:"storage_vault_id,omitempty"`
+ SchemaVersion *int32 `thrift:"schema_version,18,optional" frugal:"18,optional,i32" json:"schema_version,omitempty"`
}
func NewTPushReq() *TPushReq {
@@ -10232,7 +11878,6 @@ func NewTPushReq() *TPushReq {
}
func (p *TPushReq) InitDefault() {
- *p = TPushReq{}
}
func (p *TPushReq) GetTabletId() (v types.TTabletId) {
@@ -10348,6 +11993,24 @@ func (p *TPushReq) GetColumnsDesc() (v []*descriptors.TColumn) {
}
return p.ColumnsDesc
}
+
+var TPushReq_StorageVaultId_DEFAULT string
+
+func (p *TPushReq) GetStorageVaultId() (v string) {
+ if !p.IsSetStorageVaultId() {
+ return TPushReq_StorageVaultId_DEFAULT
+ }
+ return *p.StorageVaultId
+}
+
+var TPushReq_SchemaVersion_DEFAULT int32
+
+func (p *TPushReq) GetSchemaVersion() (v int32) {
+ if !p.IsSetSchemaVersion() {
+ return TPushReq_SchemaVersion_DEFAULT
+ }
+ return *p.SchemaVersion
+}
func (p *TPushReq) SetTabletId(val types.TTabletId) {
p.TabletId = val
}
@@ -10396,6 +12059,12 @@ func (p *TPushReq) SetDescTbl(val *descriptors.TDescriptorTable) {
func (p *TPushReq) SetColumnsDesc(val []*descriptors.TColumn) {
p.ColumnsDesc = val
}
+func (p *TPushReq) SetStorageVaultId(val *string) {
+ p.StorageVaultId = val
+}
+func (p *TPushReq) SetSchemaVersion(val *int32) {
+ p.SchemaVersion = val
+}
var fieldIDToName_TPushReq = map[int16]string{
1: "tablet_id",
@@ -10414,6 +12083,8 @@ var fieldIDToName_TPushReq = map[int16]string{
14: "broker_scan_range",
15: "desc_tbl",
16: "columns_desc",
+ 17: "storage_vault_id",
+ 18: "schema_version",
}
func (p *TPushReq) IsSetHttpFilePath() bool {
@@ -10456,6 +12127,14 @@ func (p *TPushReq) IsSetColumnsDesc() bool {
return p.ColumnsDesc != nil
}
+func (p *TPushReq) IsSetStorageVaultId() bool {
+ return p.StorageVaultId != nil
+}
+
+func (p *TPushReq) IsSetSchemaVersion() bool {
+ return p.SchemaVersion != nil
+}
+
func (p *TPushReq) Read(iprot thrift.TProtocol) (err error) {
var fieldTypeId thrift.TType
@@ -10487,10 +12166,8 @@ func (p *TPushReq) Read(iprot thrift.TProtocol) (err error) {
goto ReadFieldError
}
issetTabletId = true
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 2:
if fieldTypeId == thrift.I32 {
@@ -10498,10 +12175,8 @@ func (p *TPushReq) Read(iprot thrift.TProtocol) (err error) {
goto ReadFieldError
}
issetSchemaHash = true
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 3:
if fieldTypeId == thrift.I64 {
@@ -10509,10 +12184,8 @@ func (p *TPushReq) Read(iprot thrift.TProtocol) (err error) {
goto ReadFieldError
}
issetVersion = true
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 4:
if fieldTypeId == thrift.I64 {
@@ -10520,10 +12193,8 @@ func (p *TPushReq) Read(iprot thrift.TProtocol) (err error) {
goto ReadFieldError
}
issetVersionHash = true
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 5:
if fieldTypeId == thrift.I64 {
@@ -10531,10 +12202,8 @@ func (p *TPushReq) Read(iprot thrift.TProtocol) (err error) {
goto ReadFieldError
}
issetTimeout = true
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 6:
if fieldTypeId == thrift.I32 {
@@ -10542,117 +12211,110 @@ func (p *TPushReq) Read(iprot thrift.TProtocol) (err error) {
goto ReadFieldError
}
issetPushType = true
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 7:
if fieldTypeId == thrift.STRING {
if err = p.ReadField7(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 8:
if fieldTypeId == thrift.I64 {
if err = p.ReadField8(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 9:
if fieldTypeId == thrift.LIST {
if err = p.ReadField9(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 10:
if fieldTypeId == thrift.BOOL {
if err = p.ReadField10(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 11:
if fieldTypeId == thrift.I64 {
if err = p.ReadField11(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 12:
if fieldTypeId == thrift.I64 {
if err = p.ReadField12(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 13:
if fieldTypeId == thrift.BOOL {
if err = p.ReadField13(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 14:
if fieldTypeId == thrift.STRUCT {
if err = p.ReadField14(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 15:
if fieldTypeId == thrift.STRUCT {
if err = p.ReadField15(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 16:
if fieldTypeId == thrift.LIST {
if err = p.ReadField16(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
- }
- default:
- if err = iprot.Skip(fieldTypeId); err != nil {
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
goto SkipFieldError
}
- }
-
+ case 17:
+ if fieldTypeId == thrift.STRING {
+ if err = p.ReadField17(iprot); err != nil {
+ goto ReadFieldError
+ }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ case 18:
+ if fieldTypeId == thrift.I32 {
+ if err = p.ReadField18(iprot); err != nil {
+ goto ReadFieldError
+ }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ default:
+ if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ }
if err = iprot.ReadFieldEnd(); err != nil {
goto ReadFieldEndError
}
@@ -10709,166 +12371,219 @@ RequiredFieldNotSetError:
}
func (p *TPushReq) ReadField1(iprot thrift.TProtocol) error {
+
+ var _field types.TTabletId
if v, err := iprot.ReadI64(); err != nil {
return err
} else {
- p.TabletId = v
+ _field = v
}
+ p.TabletId = _field
return nil
}
-
func (p *TPushReq) ReadField2(iprot thrift.TProtocol) error {
+
+ var _field types.TSchemaHash
if v, err := iprot.ReadI32(); err != nil {
return err
} else {
- p.SchemaHash = v
+ _field = v
}
+ p.SchemaHash = _field
return nil
}
-
func (p *TPushReq) ReadField3(iprot thrift.TProtocol) error {
+
+ var _field types.TVersion
if v, err := iprot.ReadI64(); err != nil {
return err
} else {
- p.Version = v
+ _field = v
}
+ p.Version = _field
return nil
}
-
func (p *TPushReq) ReadField4(iprot thrift.TProtocol) error {
+
+ var _field types.TVersionHash
if v, err := iprot.ReadI64(); err != nil {
return err
} else {
- p.VersionHash = v
+ _field = v
}
+ p.VersionHash = _field
return nil
}
-
func (p *TPushReq) ReadField5(iprot thrift.TProtocol) error {
+
+ var _field int64
if v, err := iprot.ReadI64(); err != nil {
return err
} else {
- p.Timeout = v
+ _field = v
}
+ p.Timeout = _field
return nil
}
-
func (p *TPushReq) ReadField6(iprot thrift.TProtocol) error {
+
+ var _field types.TPushType
if v, err := iprot.ReadI32(); err != nil {
return err
} else {
- p.PushType = types.TPushType(v)
+ _field = types.TPushType(v)
}
+ p.PushType = _field
return nil
}
-
func (p *TPushReq) ReadField7(iprot thrift.TProtocol) error {
+
+ var _field *string
if v, err := iprot.ReadString(); err != nil {
return err
} else {
- p.HttpFilePath = &v
+ _field = &v
}
+ p.HttpFilePath = _field
return nil
}
-
func (p *TPushReq) ReadField8(iprot thrift.TProtocol) error {
+
+ var _field *int64
if v, err := iprot.ReadI64(); err != nil {
return err
} else {
- p.HttpFileSize = &v
+ _field = &v
}
+ p.HttpFileSize = _field
return nil
}
-
func (p *TPushReq) ReadField9(iprot thrift.TProtocol) error {
_, size, err := iprot.ReadListBegin()
if err != nil {
return err
}
- p.DeleteConditions = make([]*palointernalservice.TCondition, 0, size)
+ _field := make([]*palointernalservice.TCondition, 0, size)
+ values := make([]palointernalservice.TCondition, size)
for i := 0; i < size; i++ {
- _elem := palointernalservice.NewTCondition()
+ _elem := &values[i]
+ _elem.InitDefault()
+
if err := _elem.Read(iprot); err != nil {
return err
}
- p.DeleteConditions = append(p.DeleteConditions, _elem)
+ _field = append(_field, _elem)
}
if err := iprot.ReadListEnd(); err != nil {
return err
}
+ p.DeleteConditions = _field
return nil
}
-
func (p *TPushReq) ReadField10(iprot thrift.TProtocol) error {
+
+ var _field *bool
if v, err := iprot.ReadBool(); err != nil {
return err
} else {
- p.NeedDecompress = &v
+ _field = &v
}
+ p.NeedDecompress = _field
return nil
}
-
func (p *TPushReq) ReadField11(iprot thrift.TProtocol) error {
+
+ var _field *types.TTransactionId
if v, err := iprot.ReadI64(); err != nil {
return err
} else {
- p.TransactionId = &v
+ _field = &v
}
+ p.TransactionId = _field
return nil
}
-
func (p *TPushReq) ReadField12(iprot thrift.TProtocol) error {
+
+ var _field *types.TPartitionId
if v, err := iprot.ReadI64(); err != nil {
return err
} else {
- p.PartitionId = &v
+ _field = &v
}
+ p.PartitionId = _field
return nil
}
-
func (p *TPushReq) ReadField13(iprot thrift.TProtocol) error {
+
+ var _field *bool
if v, err := iprot.ReadBool(); err != nil {
return err
} else {
- p.IsSchemaChanging = &v
+ _field = &v
}
+ p.IsSchemaChanging = _field
return nil
}
-
func (p *TPushReq) ReadField14(iprot thrift.TProtocol) error {
- p.BrokerScanRange = plannodes.NewTBrokerScanRange()
- if err := p.BrokerScanRange.Read(iprot); err != nil {
+ _field := plannodes.NewTBrokerScanRange()
+ if err := _field.Read(iprot); err != nil {
return err
}
+ p.BrokerScanRange = _field
return nil
}
-
func (p *TPushReq) ReadField15(iprot thrift.TProtocol) error {
- p.DescTbl = descriptors.NewTDescriptorTable()
- if err := p.DescTbl.Read(iprot); err != nil {
+ _field := descriptors.NewTDescriptorTable()
+ if err := _field.Read(iprot); err != nil {
return err
}
+ p.DescTbl = _field
return nil
}
-
func (p *TPushReq) ReadField16(iprot thrift.TProtocol) error {
_, size, err := iprot.ReadListBegin()
if err != nil {
return err
}
- p.ColumnsDesc = make([]*descriptors.TColumn, 0, size)
+ _field := make([]*descriptors.TColumn, 0, size)
+ values := make([]descriptors.TColumn, size)
for i := 0; i < size; i++ {
- _elem := descriptors.NewTColumn()
+ _elem := &values[i]
+ _elem.InitDefault()
+
if err := _elem.Read(iprot); err != nil {
return err
}
- p.ColumnsDesc = append(p.ColumnsDesc, _elem)
+ _field = append(_field, _elem)
}
if err := iprot.ReadListEnd(); err != nil {
return err
}
+ p.ColumnsDesc = _field
+ return nil
+}
+func (p *TPushReq) ReadField17(iprot thrift.TProtocol) error {
+
+ var _field *string
+ if v, err := iprot.ReadString(); err != nil {
+ return err
+ } else {
+ _field = &v
+ }
+ p.StorageVaultId = _field
+ return nil
+}
+func (p *TPushReq) ReadField18(iprot thrift.TProtocol) error {
+
+ var _field *int32
+ if v, err := iprot.ReadI32(); err != nil {
+ return err
+ } else {
+ _field = &v
+ }
+ p.SchemaVersion = _field
return nil
}
@@ -10942,7 +12657,14 @@ func (p *TPushReq) Write(oprot thrift.TProtocol) (err error) {
fieldId = 16
goto WriteFieldError
}
-
+ if err = p.writeField17(oprot); err != nil {
+ fieldId = 17
+ goto WriteFieldError
+ }
+ if err = p.writeField18(oprot); err != nil {
+ fieldId = 18
+ goto WriteFieldError
+ }
}
if err = oprot.WriteFieldStop(); err != nil {
goto WriteFieldStopError
@@ -11269,11 +12991,50 @@ WriteFieldEndError:
return thrift.PrependError(fmt.Sprintf("%T write field 16 end error: ", p), err)
}
+func (p *TPushReq) writeField17(oprot thrift.TProtocol) (err error) {
+ if p.IsSetStorageVaultId() {
+ if err = oprot.WriteFieldBegin("storage_vault_id", thrift.STRING, 17); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteString(*p.StorageVaultId); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 17 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 17 end error: ", p), err)
+}
+
+func (p *TPushReq) writeField18(oprot thrift.TProtocol) (err error) {
+ if p.IsSetSchemaVersion() {
+ if err = oprot.WriteFieldBegin("schema_version", thrift.I32, 18); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteI32(*p.SchemaVersion); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 18 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 18 end error: ", p), err)
+}
+
func (p *TPushReq) String() string {
if p == nil {
return ""
}
return fmt.Sprintf("TPushReq(%+v)", *p)
+
}
func (p *TPushReq) DeepEqual(ano *TPushReq) bool {
@@ -11330,6 +13091,12 @@ func (p *TPushReq) DeepEqual(ano *TPushReq) bool {
if !p.Field16DeepEqual(ano.ColumnsDesc) {
return false
}
+ if !p.Field17DeepEqual(ano.StorageVaultId) {
+ return false
+ }
+ if !p.Field18DeepEqual(ano.SchemaVersion) {
+ return false
+ }
return true
}
@@ -11487,13 +13254,37 @@ func (p *TPushReq) Field16DeepEqual(src []*descriptors.TColumn) bool {
}
return true
}
+func (p *TPushReq) Field17DeepEqual(src *string) bool {
+
+ if p.StorageVaultId == src {
+ return true
+ } else if p.StorageVaultId == nil || src == nil {
+ return false
+ }
+ if strings.Compare(*p.StorageVaultId, *src) != 0 {
+ return false
+ }
+ return true
+}
+func (p *TPushReq) Field18DeepEqual(src *int32) bool {
+
+ if p.SchemaVersion == src {
+ return true
+ } else if p.SchemaVersion == nil || src == nil {
+ return false
+ }
+ if *p.SchemaVersion != *src {
+ return false
+ }
+ return true
+}
type TCloneReq struct {
TabletId types.TTabletId `thrift:"tablet_id,1,required" frugal:"1,required,i64" json:"tablet_id"`
SchemaHash types.TSchemaHash `thrift:"schema_hash,2,required" frugal:"2,required,i32" json:"schema_hash"`
SrcBackends []*types.TBackend `thrift:"src_backends,3,required" frugal:"3,required,list" json:"src_backends"`
StorageMedium *types.TStorageMedium `thrift:"storage_medium,4,optional" frugal:"4,optional,TStorageMedium" json:"storage_medium,omitempty"`
- CommittedVersion *types.TVersion `thrift:"committed_version,5,optional" frugal:"5,optional,i64" json:"committed_version,omitempty"`
+ Version *types.TVersion `thrift:"version,5,optional" frugal:"5,optional,i64" json:"version,omitempty"`
CommittedVersionHash *types.TVersionHash `thrift:"committed_version_hash,6,optional" frugal:"6,optional,i64" json:"committed_version_hash,omitempty"`
TaskVersion *int32 `thrift:"task_version,7,optional" frugal:"7,optional,i32" json:"task_version,omitempty"`
SrcPathHash *int64 `thrift:"src_path_hash,8,optional" frugal:"8,optional,i64" json:"src_path_hash,omitempty"`
@@ -11501,20 +13292,20 @@ type TCloneReq struct {
TimeoutS *int32 `thrift:"timeout_s,10,optional" frugal:"10,optional,i32" json:"timeout_s,omitempty"`
ReplicaId types.TReplicaId `thrift:"replica_id,11,optional" frugal:"11,optional,i64" json:"replica_id,omitempty"`
PartitionId *int64 `thrift:"partition_id,12,optional" frugal:"12,optional,i64" json:"partition_id,omitempty"`
+ TableId int64 `thrift:"table_id,13,optional" frugal:"13,optional,i64" json:"table_id,omitempty"`
}
func NewTCloneReq() *TCloneReq {
return &TCloneReq{
ReplicaId: 0,
+ TableId: -1,
}
}
func (p *TCloneReq) InitDefault() {
- *p = TCloneReq{
-
- ReplicaId: 0,
- }
+ p.ReplicaId = 0
+ p.TableId = -1
}
func (p *TCloneReq) GetTabletId() (v types.TTabletId) {
@@ -11538,13 +13329,13 @@ func (p *TCloneReq) GetStorageMedium() (v types.TStorageMedium) {
return *p.StorageMedium
}
-var TCloneReq_CommittedVersion_DEFAULT types.TVersion
+var TCloneReq_Version_DEFAULT types.TVersion
-func (p *TCloneReq) GetCommittedVersion() (v types.TVersion) {
- if !p.IsSetCommittedVersion() {
- return TCloneReq_CommittedVersion_DEFAULT
+func (p *TCloneReq) GetVersion() (v types.TVersion) {
+ if !p.IsSetVersion() {
+ return TCloneReq_Version_DEFAULT
}
- return *p.CommittedVersion
+ return *p.Version
}
var TCloneReq_CommittedVersionHash_DEFAULT types.TVersionHash
@@ -11609,6 +13400,15 @@ func (p *TCloneReq) GetPartitionId() (v int64) {
}
return *p.PartitionId
}
+
+var TCloneReq_TableId_DEFAULT int64 = -1
+
+func (p *TCloneReq) GetTableId() (v int64) {
+ if !p.IsSetTableId() {
+ return TCloneReq_TableId_DEFAULT
+ }
+ return p.TableId
+}
func (p *TCloneReq) SetTabletId(val types.TTabletId) {
p.TabletId = val
}
@@ -11621,8 +13421,8 @@ func (p *TCloneReq) SetSrcBackends(val []*types.TBackend) {
func (p *TCloneReq) SetStorageMedium(val *types.TStorageMedium) {
p.StorageMedium = val
}
-func (p *TCloneReq) SetCommittedVersion(val *types.TVersion) {
- p.CommittedVersion = val
+func (p *TCloneReq) SetVersion(val *types.TVersion) {
+ p.Version = val
}
func (p *TCloneReq) SetCommittedVersionHash(val *types.TVersionHash) {
p.CommittedVersionHash = val
@@ -11645,13 +13445,16 @@ func (p *TCloneReq) SetReplicaId(val types.TReplicaId) {
func (p *TCloneReq) SetPartitionId(val *int64) {
p.PartitionId = val
}
+func (p *TCloneReq) SetTableId(val int64) {
+ p.TableId = val
+}
var fieldIDToName_TCloneReq = map[int16]string{
1: "tablet_id",
2: "schema_hash",
3: "src_backends",
4: "storage_medium",
- 5: "committed_version",
+ 5: "version",
6: "committed_version_hash",
7: "task_version",
8: "src_path_hash",
@@ -11659,14 +13462,15 @@ var fieldIDToName_TCloneReq = map[int16]string{
10: "timeout_s",
11: "replica_id",
12: "partition_id",
+ 13: "table_id",
}
func (p *TCloneReq) IsSetStorageMedium() bool {
return p.StorageMedium != nil
}
-func (p *TCloneReq) IsSetCommittedVersion() bool {
- return p.CommittedVersion != nil
+func (p *TCloneReq) IsSetVersion() bool {
+ return p.Version != nil
}
func (p *TCloneReq) IsSetCommittedVersionHash() bool {
@@ -11697,6 +13501,10 @@ func (p *TCloneReq) IsSetPartitionId() bool {
return p.PartitionId != nil
}
+func (p *TCloneReq) IsSetTableId() bool {
+ return p.TableId != TCloneReq_TableId_DEFAULT
+}
+
func (p *TCloneReq) Read(iprot thrift.TProtocol) (err error) {
var fieldTypeId thrift.TType
@@ -11725,10 +13533,8 @@ func (p *TCloneReq) Read(iprot thrift.TProtocol) (err error) {
goto ReadFieldError
}
issetTabletId = true
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 2:
if fieldTypeId == thrift.I32 {
@@ -11736,10 +13542,8 @@ func (p *TCloneReq) Read(iprot thrift.TProtocol) (err error) {
goto ReadFieldError
}
issetSchemaHash = true
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 3:
if fieldTypeId == thrift.LIST {
@@ -11747,107 +13551,94 @@ func (p *TCloneReq) Read(iprot thrift.TProtocol) (err error) {
goto ReadFieldError
}
issetSrcBackends = true
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 4:
if fieldTypeId == thrift.I32 {
if err = p.ReadField4(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 5:
if fieldTypeId == thrift.I64 {
if err = p.ReadField5(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 6:
if fieldTypeId == thrift.I64 {
if err = p.ReadField6(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 7:
if fieldTypeId == thrift.I32 {
if err = p.ReadField7(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 8:
if fieldTypeId == thrift.I64 {
if err = p.ReadField8(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 9:
if fieldTypeId == thrift.I64 {
if err = p.ReadField9(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 10:
if fieldTypeId == thrift.I32 {
if err = p.ReadField10(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 11:
if fieldTypeId == thrift.I64 {
if err = p.ReadField11(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 12:
if fieldTypeId == thrift.I64 {
if err = p.ReadField12(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ case 13:
+ if fieldTypeId == thrift.I64 {
+ if err = p.ReadField13(iprot); err != nil {
+ goto ReadFieldError
}
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
default:
if err = iprot.Skip(fieldTypeId); err != nil {
goto SkipFieldError
}
}
-
if err = iprot.ReadFieldEnd(); err != nil {
goto ReadFieldEndError
}
@@ -11889,122 +13680,159 @@ RequiredFieldNotSetError:
}
func (p *TCloneReq) ReadField1(iprot thrift.TProtocol) error {
+
+ var _field types.TTabletId
if v, err := iprot.ReadI64(); err != nil {
return err
} else {
- p.TabletId = v
+ _field = v
}
+ p.TabletId = _field
return nil
}
-
func (p *TCloneReq) ReadField2(iprot thrift.TProtocol) error {
+
+ var _field types.TSchemaHash
if v, err := iprot.ReadI32(); err != nil {
return err
} else {
- p.SchemaHash = v
+ _field = v
}
+ p.SchemaHash = _field
return nil
}
-
func (p *TCloneReq) ReadField3(iprot thrift.TProtocol) error {
_, size, err := iprot.ReadListBegin()
if err != nil {
return err
}
- p.SrcBackends = make([]*types.TBackend, 0, size)
+ _field := make([]*types.TBackend, 0, size)
+ values := make([]types.TBackend, size)
for i := 0; i < size; i++ {
- _elem := types.NewTBackend()
+ _elem := &values[i]
+ _elem.InitDefault()
+
if err := _elem.Read(iprot); err != nil {
return err
}
- p.SrcBackends = append(p.SrcBackends, _elem)
+ _field = append(_field, _elem)
}
if err := iprot.ReadListEnd(); err != nil {
return err
}
+ p.SrcBackends = _field
return nil
}
-
func (p *TCloneReq) ReadField4(iprot thrift.TProtocol) error {
+
+ var _field *types.TStorageMedium
if v, err := iprot.ReadI32(); err != nil {
return err
} else {
tmp := types.TStorageMedium(v)
- p.StorageMedium = &tmp
+ _field = &tmp
}
+ p.StorageMedium = _field
return nil
}
-
func (p *TCloneReq) ReadField5(iprot thrift.TProtocol) error {
+
+ var _field *types.TVersion
if v, err := iprot.ReadI64(); err != nil {
return err
} else {
- p.CommittedVersion = &v
+ _field = &v
}
+ p.Version = _field
return nil
}
-
func (p *TCloneReq) ReadField6(iprot thrift.TProtocol) error {
+
+ var _field *types.TVersionHash
if v, err := iprot.ReadI64(); err != nil {
return err
} else {
- p.CommittedVersionHash = &v
+ _field = &v
}
+ p.CommittedVersionHash = _field
return nil
}
-
func (p *TCloneReq) ReadField7(iprot thrift.TProtocol) error {
+
+ var _field *int32
if v, err := iprot.ReadI32(); err != nil {
return err
} else {
- p.TaskVersion = &v
+ _field = &v
}
+ p.TaskVersion = _field
return nil
}
-
func (p *TCloneReq) ReadField8(iprot thrift.TProtocol) error {
+
+ var _field *int64
if v, err := iprot.ReadI64(); err != nil {
return err
} else {
- p.SrcPathHash = &v
+ _field = &v
}
+ p.SrcPathHash = _field
return nil
}
-
func (p *TCloneReq) ReadField9(iprot thrift.TProtocol) error {
+
+ var _field *int64
if v, err := iprot.ReadI64(); err != nil {
return err
} else {
- p.DestPathHash = &v
+ _field = &v
}
+ p.DestPathHash = _field
return nil
}
-
func (p *TCloneReq) ReadField10(iprot thrift.TProtocol) error {
+
+ var _field *int32
if v, err := iprot.ReadI32(); err != nil {
return err
} else {
- p.TimeoutS = &v
+ _field = &v
}
+ p.TimeoutS = _field
return nil
}
-
func (p *TCloneReq) ReadField11(iprot thrift.TProtocol) error {
+
+ var _field types.TReplicaId
if v, err := iprot.ReadI64(); err != nil {
return err
} else {
- p.ReplicaId = v
+ _field = v
}
+ p.ReplicaId = _field
return nil
}
-
func (p *TCloneReq) ReadField12(iprot thrift.TProtocol) error {
+
+ var _field *int64
if v, err := iprot.ReadI64(); err != nil {
return err
} else {
- p.PartitionId = &v
+ _field = &v
}
+ p.PartitionId = _field
+ return nil
+}
+func (p *TCloneReq) ReadField13(iprot thrift.TProtocol) error {
+
+ var _field int64
+ if v, err := iprot.ReadI64(); err != nil {
+ return err
+ } else {
+ _field = v
+ }
+ p.TableId = _field
return nil
}
@@ -12062,7 +13890,10 @@ func (p *TCloneReq) Write(oprot thrift.TProtocol) (err error) {
fieldId = 12
goto WriteFieldError
}
-
+ if err = p.writeField13(oprot); err != nil {
+ fieldId = 13
+ goto WriteFieldError
+ }
}
if err = oprot.WriteFieldStop(); err != nil {
goto WriteFieldStopError
@@ -12160,11 +13991,11 @@ WriteFieldEndError:
}
func (p *TCloneReq) writeField5(oprot thrift.TProtocol) (err error) {
- if p.IsSetCommittedVersion() {
- if err = oprot.WriteFieldBegin("committed_version", thrift.I64, 5); err != nil {
+ if p.IsSetVersion() {
+ if err = oprot.WriteFieldBegin("version", thrift.I64, 5); err != nil {
goto WriteFieldBeginError
}
- if err := oprot.WriteI64(*p.CommittedVersion); err != nil {
+ if err := oprot.WriteI64(*p.Version); err != nil {
return err
}
if err = oprot.WriteFieldEnd(); err != nil {
@@ -12311,11 +14142,31 @@ WriteFieldEndError:
return thrift.PrependError(fmt.Sprintf("%T write field 12 end error: ", p), err)
}
+func (p *TCloneReq) writeField13(oprot thrift.TProtocol) (err error) {
+ if p.IsSetTableId() {
+ if err = oprot.WriteFieldBegin("table_id", thrift.I64, 13); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteI64(p.TableId); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 13 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 13 end error: ", p), err)
+}
+
func (p *TCloneReq) String() string {
if p == nil {
return ""
}
return fmt.Sprintf("TCloneReq(%+v)", *p)
+
}
func (p *TCloneReq) DeepEqual(ano *TCloneReq) bool {
@@ -12336,7 +14187,7 @@ func (p *TCloneReq) DeepEqual(ano *TCloneReq) bool {
if !p.Field4DeepEqual(ano.StorageMedium) {
return false
}
- if !p.Field5DeepEqual(ano.CommittedVersion) {
+ if !p.Field5DeepEqual(ano.Version) {
return false
}
if !p.Field6DeepEqual(ano.CommittedVersionHash) {
@@ -12360,6 +14211,9 @@ func (p *TCloneReq) DeepEqual(ano *TCloneReq) bool {
if !p.Field12DeepEqual(ano.PartitionId) {
return false
}
+ if !p.Field13DeepEqual(ano.TableId) {
+ return false
+ }
return true
}
@@ -12404,12 +14258,12 @@ func (p *TCloneReq) Field4DeepEqual(src *types.TStorageMedium) bool {
}
func (p *TCloneReq) Field5DeepEqual(src *types.TVersion) bool {
- if p.CommittedVersion == src {
+ if p.Version == src {
return true
- } else if p.CommittedVersion == nil || src == nil {
+ } else if p.Version == nil || src == nil {
return false
}
- if *p.CommittedVersion != *src {
+ if *p.Version != *src {
return false
}
return true
@@ -12493,6 +14347,13 @@ func (p *TCloneReq) Field12DeepEqual(src *int64) bool {
}
return true
}
+func (p *TCloneReq) Field13DeepEqual(src int64) bool {
+
+ if p.TableId != src {
+ return false
+ }
+ return true
+}
type TCompactionReq struct {
TabletId *types.TTabletId `thrift:"tablet_id,1,optional" frugal:"1,optional,i64" json:"tablet_id,omitempty"`
@@ -12505,7 +14366,6 @@ func NewTCompactionReq() *TCompactionReq {
}
func (p *TCompactionReq) InitDefault() {
- *p = TCompactionReq{}
}
var TCompactionReq_TabletId_DEFAULT types.TTabletId
@@ -12586,37 +14446,30 @@ func (p *TCompactionReq) Read(iprot thrift.TProtocol) (err error) {
if err = p.ReadField1(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 2:
if fieldTypeId == thrift.I32 {
if err = p.ReadField2(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 3:
if fieldTypeId == thrift.STRING {
if err = p.ReadField3(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
default:
if err = iprot.Skip(fieldTypeId); err != nil {
goto SkipFieldError
}
}
-
if err = iprot.ReadFieldEnd(); err != nil {
goto ReadFieldEndError
}
@@ -12642,29 +14495,36 @@ ReadStructEndError:
}
func (p *TCompactionReq) ReadField1(iprot thrift.TProtocol) error {
+
+ var _field *types.TTabletId
if v, err := iprot.ReadI64(); err != nil {
return err
} else {
- p.TabletId = &v
+ _field = &v
}
+ p.TabletId = _field
return nil
}
-
func (p *TCompactionReq) ReadField2(iprot thrift.TProtocol) error {
+
+ var _field *types.TSchemaHash
if v, err := iprot.ReadI32(); err != nil {
return err
} else {
- p.SchemaHash = &v
+ _field = &v
}
+ p.SchemaHash = _field
return nil
}
-
func (p *TCompactionReq) ReadField3(iprot thrift.TProtocol) error {
+
+ var _field *string
if v, err := iprot.ReadString(); err != nil {
return err
} else {
- p.Type = &v
+ _field = &v
}
+ p.Type = _field
return nil
}
@@ -12686,7 +14546,6 @@ func (p *TCompactionReq) Write(oprot thrift.TProtocol) (err error) {
fieldId = 3
goto WriteFieldError
}
-
}
if err = oprot.WriteFieldStop(); err != nil {
goto WriteFieldStopError
@@ -12767,6 +14626,7 @@ func (p *TCompactionReq) String() string {
return ""
}
return fmt.Sprintf("TCompactionReq(%+v)", *p)
+
}
func (p *TCompactionReq) DeepEqual(ano *TCompactionReq) bool {
@@ -12836,7 +14696,6 @@ func NewTStorageMediumMigrateReq() *TStorageMediumMigrateReq {
}
func (p *TStorageMediumMigrateReq) InitDefault() {
- *p = TStorageMediumMigrateReq{}
}
func (p *TStorageMediumMigrateReq) GetTabletId() (v types.TTabletId) {
@@ -12911,10 +14770,8 @@ func (p *TStorageMediumMigrateReq) Read(iprot thrift.TProtocol) (err error) {
goto ReadFieldError
}
issetTabletId = true
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 2:
if fieldTypeId == thrift.I32 {
@@ -12922,10 +14779,8 @@ func (p *TStorageMediumMigrateReq) Read(iprot thrift.TProtocol) (err error) {
goto ReadFieldError
}
issetSchemaHash = true
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 3:
if fieldTypeId == thrift.I32 {
@@ -12933,27 +14788,22 @@ func (p *TStorageMediumMigrateReq) Read(iprot thrift.TProtocol) (err error) {
goto ReadFieldError
}
issetStorageMedium = true
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 4:
if fieldTypeId == thrift.STRING {
if err = p.ReadField4(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
default:
if err = iprot.Skip(fieldTypeId); err != nil {
goto SkipFieldError
}
}
-
if err = iprot.ReadFieldEnd(); err != nil {
goto ReadFieldEndError
}
@@ -12995,38 +14845,47 @@ RequiredFieldNotSetError:
}
func (p *TStorageMediumMigrateReq) ReadField1(iprot thrift.TProtocol) error {
+
+ var _field types.TTabletId
if v, err := iprot.ReadI64(); err != nil {
return err
} else {
- p.TabletId = v
+ _field = v
}
+ p.TabletId = _field
return nil
}
-
func (p *TStorageMediumMigrateReq) ReadField2(iprot thrift.TProtocol) error {
+
+ var _field types.TSchemaHash
if v, err := iprot.ReadI32(); err != nil {
return err
} else {
- p.SchemaHash = v
+ _field = v
}
+ p.SchemaHash = _field
return nil
}
-
func (p *TStorageMediumMigrateReq) ReadField3(iprot thrift.TProtocol) error {
+
+ var _field types.TStorageMedium
if v, err := iprot.ReadI32(); err != nil {
return err
} else {
- p.StorageMedium = types.TStorageMedium(v)
+ _field = types.TStorageMedium(v)
}
+ p.StorageMedium = _field
return nil
}
-
func (p *TStorageMediumMigrateReq) ReadField4(iprot thrift.TProtocol) error {
+
+ var _field *string
if v, err := iprot.ReadString(); err != nil {
return err
} else {
- p.DataDir = &v
+ _field = &v
}
+ p.DataDir = _field
return nil
}
@@ -13052,7 +14911,6 @@ func (p *TStorageMediumMigrateReq) Write(oprot thrift.TProtocol) (err error) {
fieldId = 4
goto WriteFieldError
}
-
}
if err = oprot.WriteFieldStop(); err != nil {
goto WriteFieldStopError
@@ -13146,6 +15004,7 @@ func (p *TStorageMediumMigrateReq) String() string {
return ""
}
return fmt.Sprintf("TStorageMediumMigrateReq(%+v)", *p)
+
}
func (p *TStorageMediumMigrateReq) DeepEqual(ano *TStorageMediumMigrateReq) bool {
@@ -13215,7 +15074,6 @@ func NewTCancelDeleteDataReq() *TCancelDeleteDataReq {
}
func (p *TCancelDeleteDataReq) InitDefault() {
- *p = TCancelDeleteDataReq{}
}
func (p *TCancelDeleteDataReq) GetTabletId() (v types.TTabletId) {
@@ -13282,10 +15140,8 @@ func (p *TCancelDeleteDataReq) Read(iprot thrift.TProtocol) (err error) {
goto ReadFieldError
}
issetTabletId = true
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 2:
if fieldTypeId == thrift.I32 {
@@ -13293,10 +15149,8 @@ func (p *TCancelDeleteDataReq) Read(iprot thrift.TProtocol) (err error) {
goto ReadFieldError
}
issetSchemaHash = true
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 3:
if fieldTypeId == thrift.I64 {
@@ -13304,10 +15158,8 @@ func (p *TCancelDeleteDataReq) Read(iprot thrift.TProtocol) (err error) {
goto ReadFieldError
}
issetVersion = true
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 4:
if fieldTypeId == thrift.I64 {
@@ -13315,17 +15167,14 @@ func (p *TCancelDeleteDataReq) Read(iprot thrift.TProtocol) (err error) {
goto ReadFieldError
}
issetVersionHash = true
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
default:
if err = iprot.Skip(fieldTypeId); err != nil {
goto SkipFieldError
}
}
-
if err = iprot.ReadFieldEnd(); err != nil {
goto ReadFieldEndError
}
@@ -13372,38 +15221,47 @@ RequiredFieldNotSetError:
}
func (p *TCancelDeleteDataReq) ReadField1(iprot thrift.TProtocol) error {
+
+ var _field types.TTabletId
if v, err := iprot.ReadI64(); err != nil {
return err
} else {
- p.TabletId = v
+ _field = v
}
+ p.TabletId = _field
return nil
}
-
func (p *TCancelDeleteDataReq) ReadField2(iprot thrift.TProtocol) error {
+
+ var _field types.TSchemaHash
if v, err := iprot.ReadI32(); err != nil {
return err
} else {
- p.SchemaHash = v
+ _field = v
}
+ p.SchemaHash = _field
return nil
}
-
func (p *TCancelDeleteDataReq) ReadField3(iprot thrift.TProtocol) error {
+
+ var _field types.TVersion
if v, err := iprot.ReadI64(); err != nil {
return err
} else {
- p.Version = v
+ _field = v
}
+ p.Version = _field
return nil
}
-
func (p *TCancelDeleteDataReq) ReadField4(iprot thrift.TProtocol) error {
+
+ var _field types.TVersionHash
if v, err := iprot.ReadI64(); err != nil {
return err
} else {
- p.VersionHash = v
+ _field = v
}
+ p.VersionHash = _field
return nil
}
@@ -13429,7 +15287,6 @@ func (p *TCancelDeleteDataReq) Write(oprot thrift.TProtocol) (err error) {
fieldId = 4
goto WriteFieldError
}
-
}
if err = oprot.WriteFieldStop(); err != nil {
goto WriteFieldStopError
@@ -13521,6 +15378,7 @@ func (p *TCancelDeleteDataReq) String() string {
return ""
}
return fmt.Sprintf("TCancelDeleteDataReq(%+v)", *p)
+
}
func (p *TCancelDeleteDataReq) DeepEqual(ano *TCancelDeleteDataReq) bool {
@@ -13585,7 +15443,6 @@ func NewTCheckConsistencyReq() *TCheckConsistencyReq {
}
func (p *TCheckConsistencyReq) InitDefault() {
- *p = TCheckConsistencyReq{}
}
func (p *TCheckConsistencyReq) GetTabletId() (v types.TTabletId) {
@@ -13652,10 +15509,8 @@ func (p *TCheckConsistencyReq) Read(iprot thrift.TProtocol) (err error) {
goto ReadFieldError
}
issetTabletId = true
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 2:
if fieldTypeId == thrift.I32 {
@@ -13663,10 +15518,8 @@ func (p *TCheckConsistencyReq) Read(iprot thrift.TProtocol) (err error) {
goto ReadFieldError
}
issetSchemaHash = true
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 3:
if fieldTypeId == thrift.I64 {
@@ -13674,10 +15527,8 @@ func (p *TCheckConsistencyReq) Read(iprot thrift.TProtocol) (err error) {
goto ReadFieldError
}
issetVersion = true
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 4:
if fieldTypeId == thrift.I64 {
@@ -13685,17 +15536,14 @@ func (p *TCheckConsistencyReq) Read(iprot thrift.TProtocol) (err error) {
goto ReadFieldError
}
issetVersionHash = true
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
default:
if err = iprot.Skip(fieldTypeId); err != nil {
goto SkipFieldError
}
}
-
if err = iprot.ReadFieldEnd(); err != nil {
goto ReadFieldEndError
}
@@ -13742,38 +15590,47 @@ RequiredFieldNotSetError:
}
func (p *TCheckConsistencyReq) ReadField1(iprot thrift.TProtocol) error {
+
+ var _field types.TTabletId
if v, err := iprot.ReadI64(); err != nil {
return err
} else {
- p.TabletId = v
+ _field = v
}
+ p.TabletId = _field
return nil
}
-
func (p *TCheckConsistencyReq) ReadField2(iprot thrift.TProtocol) error {
+
+ var _field types.TSchemaHash
if v, err := iprot.ReadI32(); err != nil {
return err
} else {
- p.SchemaHash = v
+ _field = v
}
+ p.SchemaHash = _field
return nil
}
-
func (p *TCheckConsistencyReq) ReadField3(iprot thrift.TProtocol) error {
+
+ var _field types.TVersion
if v, err := iprot.ReadI64(); err != nil {
return err
} else {
- p.Version = v
+ _field = v
}
+ p.Version = _field
return nil
}
-
func (p *TCheckConsistencyReq) ReadField4(iprot thrift.TProtocol) error {
+
+ var _field types.TVersionHash
if v, err := iprot.ReadI64(); err != nil {
return err
} else {
- p.VersionHash = v
+ _field = v
}
+ p.VersionHash = _field
return nil
}
@@ -13799,7 +15656,6 @@ func (p *TCheckConsistencyReq) Write(oprot thrift.TProtocol) (err error) {
fieldId = 4
goto WriteFieldError
}
-
}
if err = oprot.WriteFieldStop(); err != nil {
goto WriteFieldStopError
@@ -13891,6 +15747,7 @@ func (p *TCheckConsistencyReq) String() string {
return ""
}
return fmt.Sprintf("TCheckConsistencyReq(%+v)", *p)
+
}
func (p *TCheckConsistencyReq) DeepEqual(ano *TCheckConsistencyReq) bool {
@@ -13960,10 +15817,7 @@ func NewTUploadReq() *TUploadReq {
}
func (p *TUploadReq) InitDefault() {
- *p = TUploadReq{
-
- StorageBackend: types.TStorageBackendType_BROKER,
- }
+ p.StorageBackend = types.TStorageBackendType_BROKER
}
func (p *TUploadReq) GetJobId() (v int64) {
@@ -14081,10 +15935,8 @@ func (p *TUploadReq) Read(iprot thrift.TProtocol) (err error) {
goto ReadFieldError
}
issetJobId = true
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 2:
if fieldTypeId == thrift.MAP {
@@ -14092,10 +15944,8 @@ func (p *TUploadReq) Read(iprot thrift.TProtocol) (err error) {
goto ReadFieldError
}
issetSrcDestMap = true
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 3:
if fieldTypeId == thrift.STRUCT {
@@ -14103,47 +15953,38 @@ func (p *TUploadReq) Read(iprot thrift.TProtocol) (err error) {
goto ReadFieldError
}
issetBrokerAddr = true
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 4:
if fieldTypeId == thrift.MAP {
if err = p.ReadField4(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 5:
if fieldTypeId == thrift.I32 {
if err = p.ReadField5(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 6:
if fieldTypeId == thrift.STRING {
if err = p.ReadField6(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
default:
if err = iprot.Skip(fieldTypeId); err != nil {
goto SkipFieldError
}
}
-
if err = iprot.ReadFieldEnd(); err != nil {
goto ReadFieldEndError
}
@@ -14185,20 +16026,22 @@ RequiredFieldNotSetError:
}
func (p *TUploadReq) ReadField1(iprot thrift.TProtocol) error {
+
+ var _field int64
if v, err := iprot.ReadI64(); err != nil {
return err
} else {
- p.JobId = v
+ _field = v
}
+ p.JobId = _field
return nil
}
-
func (p *TUploadReq) ReadField2(iprot thrift.TProtocol) error {
_, _, size, err := iprot.ReadMapBegin()
if err != nil {
return err
}
- p.SrcDestMap = make(map[string]string, size)
+ _field := make(map[string]string, size)
for i := 0; i < size; i++ {
var _key string
if v, err := iprot.ReadString(); err != nil {
@@ -14214,28 +16057,28 @@ func (p *TUploadReq) ReadField2(iprot thrift.TProtocol) error {
_val = v
}
- p.SrcDestMap[_key] = _val
+ _field[_key] = _val
}
if err := iprot.ReadMapEnd(); err != nil {
return err
}
+ p.SrcDestMap = _field
return nil
}
-
func (p *TUploadReq) ReadField3(iprot thrift.TProtocol) error {
- p.BrokerAddr = types.NewTNetworkAddress()
- if err := p.BrokerAddr.Read(iprot); err != nil {
+ _field := types.NewTNetworkAddress()
+ if err := _field.Read(iprot); err != nil {
return err
}
+ p.BrokerAddr = _field
return nil
}
-
func (p *TUploadReq) ReadField4(iprot thrift.TProtocol) error {
_, _, size, err := iprot.ReadMapBegin()
if err != nil {
return err
}
- p.BrokerProp = make(map[string]string, size)
+ _field := make(map[string]string, size)
for i := 0; i < size; i++ {
var _key string
if v, err := iprot.ReadString(); err != nil {
@@ -14251,29 +16094,34 @@ func (p *TUploadReq) ReadField4(iprot thrift.TProtocol) error {
_val = v
}
- p.BrokerProp[_key] = _val
+ _field[_key] = _val
}
if err := iprot.ReadMapEnd(); err != nil {
return err
}
+ p.BrokerProp = _field
return nil
}
-
func (p *TUploadReq) ReadField5(iprot thrift.TProtocol) error {
+
+ var _field types.TStorageBackendType
if v, err := iprot.ReadI32(); err != nil {
return err
} else {
- p.StorageBackend = types.TStorageBackendType(v)
+ _field = types.TStorageBackendType(v)
}
+ p.StorageBackend = _field
return nil
}
-
func (p *TUploadReq) ReadField6(iprot thrift.TProtocol) error {
+
+ var _field *string
if v, err := iprot.ReadString(); err != nil {
return err
} else {
- p.Location = &v
+ _field = &v
}
+ p.Location = _field
return nil
}
@@ -14307,7 +16155,6 @@ func (p *TUploadReq) Write(oprot thrift.TProtocol) (err error) {
fieldId = 6
goto WriteFieldError
}
-
}
if err = oprot.WriteFieldStop(); err != nil {
goto WriteFieldStopError
@@ -14351,11 +16198,9 @@ func (p *TUploadReq) writeField2(oprot thrift.TProtocol) (err error) {
return err
}
for k, v := range p.SrcDestMap {
-
if err := oprot.WriteString(k); err != nil {
return err
}
-
if err := oprot.WriteString(v); err != nil {
return err
}
@@ -14399,11 +16244,9 @@ func (p *TUploadReq) writeField4(oprot thrift.TProtocol) (err error) {
return err
}
for k, v := range p.BrokerProp {
-
if err := oprot.WriteString(k); err != nil {
return err
}
-
if err := oprot.WriteString(v); err != nil {
return err
}
@@ -14465,6 +16308,7 @@ func (p *TUploadReq) String() string {
return ""
}
return fmt.Sprintf("TUploadReq(%+v)", *p)
+
}
func (p *TUploadReq) DeepEqual(ano *TUploadReq) bool {
@@ -14569,7 +16413,6 @@ func NewTRemoteTabletSnapshot() *TRemoteTabletSnapshot {
}
func (p *TRemoteTabletSnapshot) InitDefault() {
- *p = TRemoteTabletSnapshot{}
}
var TRemoteTabletSnapshot_LocalTabletId_DEFAULT int64
@@ -14718,77 +16561,62 @@ func (p *TRemoteTabletSnapshot) Read(iprot thrift.TProtocol) (err error) {
if err = p.ReadField1(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 2:
if fieldTypeId == thrift.STRING {
if err = p.ReadField2(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 3:
if fieldTypeId == thrift.I64 {
if err = p.ReadField3(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 4:
if fieldTypeId == thrift.I64 {
if err = p.ReadField4(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 5:
if fieldTypeId == thrift.STRUCT {
if err = p.ReadField5(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 6:
if fieldTypeId == thrift.STRING {
if err = p.ReadField6(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 7:
if fieldTypeId == thrift.STRING {
if err = p.ReadField7(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
default:
if err = iprot.Skip(fieldTypeId); err != nil {
goto SkipFieldError
}
}
-
if err = iprot.ReadFieldEnd(); err != nil {
goto ReadFieldEndError
}
@@ -14814,64 +16642,77 @@ ReadStructEndError:
}
func (p *TRemoteTabletSnapshot) ReadField1(iprot thrift.TProtocol) error {
+
+ var _field *int64
if v, err := iprot.ReadI64(); err != nil {
return err
} else {
- p.LocalTabletId = &v
+ _field = &v
}
+ p.LocalTabletId = _field
return nil
}
-
func (p *TRemoteTabletSnapshot) ReadField2(iprot thrift.TProtocol) error {
+
+ var _field *string
if v, err := iprot.ReadString(); err != nil {
return err
} else {
- p.LocalSnapshotPath = &v
+ _field = &v
}
+ p.LocalSnapshotPath = _field
return nil
}
-
func (p *TRemoteTabletSnapshot) ReadField3(iprot thrift.TProtocol) error {
+
+ var _field *int64
if v, err := iprot.ReadI64(); err != nil {
return err
} else {
- p.RemoteTabletId = &v
+ _field = &v
}
+ p.RemoteTabletId = _field
return nil
}
-
func (p *TRemoteTabletSnapshot) ReadField4(iprot thrift.TProtocol) error {
+
+ var _field *int64
if v, err := iprot.ReadI64(); err != nil {
return err
} else {
- p.RemoteBeId = &v
+ _field = &v
}
+ p.RemoteBeId = _field
return nil
}
-
func (p *TRemoteTabletSnapshot) ReadField5(iprot thrift.TProtocol) error {
- p.RemoteBeAddr = types.NewTNetworkAddress()
- if err := p.RemoteBeAddr.Read(iprot); err != nil {
+ _field := types.NewTNetworkAddress()
+ if err := _field.Read(iprot); err != nil {
return err
}
+ p.RemoteBeAddr = _field
return nil
}
-
func (p *TRemoteTabletSnapshot) ReadField6(iprot thrift.TProtocol) error {
+
+ var _field *string
if v, err := iprot.ReadString(); err != nil {
return err
} else {
- p.RemoteSnapshotPath = &v
+ _field = &v
}
+ p.RemoteSnapshotPath = _field
return nil
}
-
func (p *TRemoteTabletSnapshot) ReadField7(iprot thrift.TProtocol) error {
+
+ var _field *string
if v, err := iprot.ReadString(); err != nil {
return err
} else {
- p.RemoteToken = &v
+ _field = &v
}
+ p.RemoteToken = _field
return nil
}
@@ -14909,7 +16750,6 @@ func (p *TRemoteTabletSnapshot) Write(oprot thrift.TProtocol) (err error) {
fieldId = 7
goto WriteFieldError
}
-
}
if err = oprot.WriteFieldStop(); err != nil {
goto WriteFieldStopError
@@ -15066,6 +16906,7 @@ func (p *TRemoteTabletSnapshot) String() string {
return ""
}
return fmt.Sprintf("TRemoteTabletSnapshot(%+v)", *p)
+
}
func (p *TRemoteTabletSnapshot) DeepEqual(ano *TRemoteTabletSnapshot) bool {
@@ -15196,10 +17037,7 @@ func NewTDownloadReq() *TDownloadReq {
}
func (p *TDownloadReq) InitDefault() {
- *p = TDownloadReq{
-
- StorageBackend: types.TStorageBackendType_BROKER,
- }
+ p.StorageBackend = types.TStorageBackendType_BROKER
}
func (p *TDownloadReq) GetJobId() (v int64) {
@@ -15334,10 +17172,8 @@ func (p *TDownloadReq) Read(iprot thrift.TProtocol) (err error) {
goto ReadFieldError
}
issetJobId = true
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 2:
if fieldTypeId == thrift.MAP {
@@ -15345,10 +17181,8 @@ func (p *TDownloadReq) Read(iprot thrift.TProtocol) (err error) {
goto ReadFieldError
}
issetSrcDestMap = true
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 3:
if fieldTypeId == thrift.STRUCT {
@@ -15356,57 +17190,46 @@ func (p *TDownloadReq) Read(iprot thrift.TProtocol) (err error) {
goto ReadFieldError
}
issetBrokerAddr = true
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 4:
if fieldTypeId == thrift.MAP {
if err = p.ReadField4(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 5:
if fieldTypeId == thrift.I32 {
if err = p.ReadField5(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 6:
if fieldTypeId == thrift.STRING {
if err = p.ReadField6(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 7:
if fieldTypeId == thrift.LIST {
if err = p.ReadField7(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
default:
if err = iprot.Skip(fieldTypeId); err != nil {
goto SkipFieldError
}
}
-
if err = iprot.ReadFieldEnd(); err != nil {
goto ReadFieldEndError
}
@@ -15448,20 +17271,22 @@ RequiredFieldNotSetError:
}
func (p *TDownloadReq) ReadField1(iprot thrift.TProtocol) error {
+
+ var _field int64
if v, err := iprot.ReadI64(); err != nil {
return err
} else {
- p.JobId = v
+ _field = v
}
+ p.JobId = _field
return nil
}
-
func (p *TDownloadReq) ReadField2(iprot thrift.TProtocol) error {
_, _, size, err := iprot.ReadMapBegin()
if err != nil {
return err
}
- p.SrcDestMap = make(map[string]string, size)
+ _field := make(map[string]string, size)
for i := 0; i < size; i++ {
var _key string
if v, err := iprot.ReadString(); err != nil {
@@ -15477,28 +17302,28 @@ func (p *TDownloadReq) ReadField2(iprot thrift.TProtocol) error {
_val = v
}
- p.SrcDestMap[_key] = _val
+ _field[_key] = _val
}
if err := iprot.ReadMapEnd(); err != nil {
return err
}
+ p.SrcDestMap = _field
return nil
}
-
func (p *TDownloadReq) ReadField3(iprot thrift.TProtocol) error {
- p.BrokerAddr = types.NewTNetworkAddress()
- if err := p.BrokerAddr.Read(iprot); err != nil {
+ _field := types.NewTNetworkAddress()
+ if err := _field.Read(iprot); err != nil {
return err
}
+ p.BrokerAddr = _field
return nil
}
-
func (p *TDownloadReq) ReadField4(iprot thrift.TProtocol) error {
_, _, size, err := iprot.ReadMapBegin()
if err != nil {
return err
}
- p.BrokerProp = make(map[string]string, size)
+ _field := make(map[string]string, size)
for i := 0; i < size; i++ {
var _key string
if v, err := iprot.ReadString(); err != nil {
@@ -15514,49 +17339,57 @@ func (p *TDownloadReq) ReadField4(iprot thrift.TProtocol) error {
_val = v
}
- p.BrokerProp[_key] = _val
+ _field[_key] = _val
}
if err := iprot.ReadMapEnd(); err != nil {
return err
}
+ p.BrokerProp = _field
return nil
}
-
func (p *TDownloadReq) ReadField5(iprot thrift.TProtocol) error {
+
+ var _field types.TStorageBackendType
if v, err := iprot.ReadI32(); err != nil {
return err
} else {
- p.StorageBackend = types.TStorageBackendType(v)
+ _field = types.TStorageBackendType(v)
}
+ p.StorageBackend = _field
return nil
}
-
func (p *TDownloadReq) ReadField6(iprot thrift.TProtocol) error {
+
+ var _field *string
if v, err := iprot.ReadString(); err != nil {
return err
} else {
- p.Location = &v
+ _field = &v
}
+ p.Location = _field
return nil
}
-
func (p *TDownloadReq) ReadField7(iprot thrift.TProtocol) error {
_, size, err := iprot.ReadListBegin()
if err != nil {
return err
}
- p.RemoteTabletSnapshots = make([]*TRemoteTabletSnapshot, 0, size)
+ _field := make([]*TRemoteTabletSnapshot, 0, size)
+ values := make([]TRemoteTabletSnapshot, size)
for i := 0; i < size; i++ {
- _elem := NewTRemoteTabletSnapshot()
+ _elem := &values[i]
+ _elem.InitDefault()
+
if err := _elem.Read(iprot); err != nil {
return err
}
- p.RemoteTabletSnapshots = append(p.RemoteTabletSnapshots, _elem)
+ _field = append(_field, _elem)
}
if err := iprot.ReadListEnd(); err != nil {
return err
}
+ p.RemoteTabletSnapshots = _field
return nil
}
@@ -15594,7 +17427,6 @@ func (p *TDownloadReq) Write(oprot thrift.TProtocol) (err error) {
fieldId = 7
goto WriteFieldError
}
-
}
if err = oprot.WriteFieldStop(); err != nil {
goto WriteFieldStopError
@@ -15638,11 +17470,9 @@ func (p *TDownloadReq) writeField2(oprot thrift.TProtocol) (err error) {
return err
}
for k, v := range p.SrcDestMap {
-
if err := oprot.WriteString(k); err != nil {
return err
}
-
if err := oprot.WriteString(v); err != nil {
return err
}
@@ -15686,11 +17516,9 @@ func (p *TDownloadReq) writeField4(oprot thrift.TProtocol) (err error) {
return err
}
for k, v := range p.BrokerProp {
-
if err := oprot.WriteString(k); err != nil {
return err
}
-
if err := oprot.WriteString(v); err != nil {
return err
}
@@ -15779,6 +17607,7 @@ func (p *TDownloadReq) String() string {
return ""
}
return fmt.Sprintf("TDownloadReq(%+v)", *p)
+
}
func (p *TDownloadReq) DeepEqual(ano *TDownloadReq) bool {
@@ -15898,6 +17727,7 @@ type TSnapshotRequest struct {
StartVersion *types.TVersion `thrift:"start_version,11,optional" frugal:"11,optional,i64" json:"start_version,omitempty"`
EndVersion *types.TVersion `thrift:"end_version,12,optional" frugal:"12,optional,i64" json:"end_version,omitempty"`
IsCopyBinlog *bool `thrift:"is_copy_binlog,13,optional" frugal:"13,optional,bool" json:"is_copy_binlog,omitempty"`
+ RefTabletId *types.TTabletId `thrift:"ref_tablet_id,14,optional" frugal:"14,optional,i64" json:"ref_tablet_id,omitempty"`
}
func NewTSnapshotRequest() *TSnapshotRequest {
@@ -15908,10 +17738,7 @@ func NewTSnapshotRequest() *TSnapshotRequest {
}
func (p *TSnapshotRequest) InitDefault() {
- *p = TSnapshotRequest{
-
- PreferredSnapshotVersion: int32(types.TPREFER_SNAPSHOT_REQ_VERSION),
- }
+ p.PreferredSnapshotVersion = int32(types.TPREFER_SNAPSHOT_REQ_VERSION)
}
func (p *TSnapshotRequest) GetTabletId() (v types.TTabletId) {
@@ -16020,6 +17847,15 @@ func (p *TSnapshotRequest) GetIsCopyBinlog() (v bool) {
}
return *p.IsCopyBinlog
}
+
+var TSnapshotRequest_RefTabletId_DEFAULT types.TTabletId
+
+func (p *TSnapshotRequest) GetRefTabletId() (v types.TTabletId) {
+ if !p.IsSetRefTabletId() {
+ return TSnapshotRequest_RefTabletId_DEFAULT
+ }
+ return *p.RefTabletId
+}
func (p *TSnapshotRequest) SetTabletId(val types.TTabletId) {
p.TabletId = val
}
@@ -16059,6 +17895,9 @@ func (p *TSnapshotRequest) SetEndVersion(val *types.TVersion) {
func (p *TSnapshotRequest) SetIsCopyBinlog(val *bool) {
p.IsCopyBinlog = val
}
+func (p *TSnapshotRequest) SetRefTabletId(val *types.TTabletId) {
+ p.RefTabletId = val
+}
var fieldIDToName_TSnapshotRequest = map[int16]string{
1: "tablet_id",
@@ -16074,6 +17913,7 @@ var fieldIDToName_TSnapshotRequest = map[int16]string{
11: "start_version",
12: "end_version",
13: "is_copy_binlog",
+ 14: "ref_tablet_id",
}
func (p *TSnapshotRequest) IsSetVersion() bool {
@@ -16120,6 +17960,10 @@ func (p *TSnapshotRequest) IsSetIsCopyBinlog() bool {
return p.IsCopyBinlog != nil
}
+func (p *TSnapshotRequest) IsSetRefTabletId() bool {
+ return p.RefTabletId != nil
+}
+
func (p *TSnapshotRequest) Read(iprot thrift.TProtocol) (err error) {
var fieldTypeId thrift.TType
@@ -16147,10 +17991,8 @@ func (p *TSnapshotRequest) Read(iprot thrift.TProtocol) (err error) {
goto ReadFieldError
}
issetTabletId = true
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 2:
if fieldTypeId == thrift.I32 {
@@ -16158,295 +18000,977 @@ func (p *TSnapshotRequest) Read(iprot thrift.TProtocol) (err error) {
goto ReadFieldError
}
issetSchemaHash = true
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 3:
if fieldTypeId == thrift.I64 {
if err = p.ReadField3(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 4:
if fieldTypeId == thrift.I64 {
if err = p.ReadField4(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 5:
if fieldTypeId == thrift.I64 {
if err = p.ReadField5(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 6:
if fieldTypeId == thrift.LIST {
if err = p.ReadField6(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 7:
if fieldTypeId == thrift.BOOL {
if err = p.ReadField7(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 8:
if fieldTypeId == thrift.BOOL {
if err = p.ReadField8(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 9:
if fieldTypeId == thrift.I32 {
if err = p.ReadField9(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 10:
if fieldTypeId == thrift.BOOL {
if err = p.ReadField10(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 11:
if fieldTypeId == thrift.I64 {
if err = p.ReadField11(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 12:
if fieldTypeId == thrift.I64 {
if err = p.ReadField12(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 13:
if fieldTypeId == thrift.BOOL {
if err = p.ReadField13(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ case 14:
+ if fieldTypeId == thrift.I64 {
+ if err = p.ReadField14(iprot); err != nil {
+ goto ReadFieldError
}
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
default:
if err = iprot.Skip(fieldTypeId); err != nil {
goto SkipFieldError
}
}
-
if err = iprot.ReadFieldEnd(); err != nil {
goto ReadFieldEndError
}
}
- if err = iprot.ReadStructEnd(); err != nil {
- goto ReadStructEndError
+ if err = iprot.ReadStructEnd(); err != nil {
+ goto ReadStructEndError
+ }
+
+ if !issetTabletId {
+ fieldId = 1
+ goto RequiredFieldNotSetError
+ }
+
+ if !issetSchemaHash {
+ fieldId = 2
+ goto RequiredFieldNotSetError
+ }
+ return nil
+ReadStructBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err)
+ReadFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
+ReadFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TSnapshotRequest[fieldId]), err)
+SkipFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
+
+ReadFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err)
+ReadStructEndError:
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+RequiredFieldNotSetError:
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TSnapshotRequest[fieldId]))
+}
+
+func (p *TSnapshotRequest) ReadField1(iprot thrift.TProtocol) error {
+
+ var _field types.TTabletId
+ if v, err := iprot.ReadI64(); err != nil {
+ return err
+ } else {
+ _field = v
+ }
+ p.TabletId = _field
+ return nil
+}
+func (p *TSnapshotRequest) ReadField2(iprot thrift.TProtocol) error {
+
+ var _field types.TSchemaHash
+ if v, err := iprot.ReadI32(); err != nil {
+ return err
+ } else {
+ _field = v
+ }
+ p.SchemaHash = _field
+ return nil
+}
+func (p *TSnapshotRequest) ReadField3(iprot thrift.TProtocol) error {
+
+ var _field *types.TVersion
+ if v, err := iprot.ReadI64(); err != nil {
+ return err
+ } else {
+ _field = &v
+ }
+ p.Version = _field
+ return nil
+}
+func (p *TSnapshotRequest) ReadField4(iprot thrift.TProtocol) error {
+
+ var _field *types.TVersionHash
+ if v, err := iprot.ReadI64(); err != nil {
+ return err
+ } else {
+ _field = &v
+ }
+ p.VersionHash = _field
+ return nil
+}
+func (p *TSnapshotRequest) ReadField5(iprot thrift.TProtocol) error {
+
+ var _field *int64
+ if v, err := iprot.ReadI64(); err != nil {
+ return err
+ } else {
+ _field = &v
+ }
+ p.Timeout = _field
+ return nil
+}
+func (p *TSnapshotRequest) ReadField6(iprot thrift.TProtocol) error {
+ _, size, err := iprot.ReadListBegin()
+ if err != nil {
+ return err
+ }
+ _field := make([]types.TVersion, 0, size)
+ for i := 0; i < size; i++ {
+
+ var _elem types.TVersion
+ if v, err := iprot.ReadI64(); err != nil {
+ return err
+ } else {
+ _elem = v
+ }
+
+ _field = append(_field, _elem)
+ }
+ if err := iprot.ReadListEnd(); err != nil {
+ return err
+ }
+ p.MissingVersion = _field
+ return nil
+}
+func (p *TSnapshotRequest) ReadField7(iprot thrift.TProtocol) error {
+
+ var _field *bool
+ if v, err := iprot.ReadBool(); err != nil {
+ return err
+ } else {
+ _field = &v
+ }
+ p.ListFiles = _field
+ return nil
+}
+func (p *TSnapshotRequest) ReadField8(iprot thrift.TProtocol) error {
+
+ var _field *bool
+ if v, err := iprot.ReadBool(); err != nil {
+ return err
+ } else {
+ _field = &v
+ }
+ p.AllowIncrementalClone = _field
+ return nil
+}
+func (p *TSnapshotRequest) ReadField9(iprot thrift.TProtocol) error {
+
+ var _field int32
+ if v, err := iprot.ReadI32(); err != nil {
+ return err
+ } else {
+ _field = v
+ }
+ p.PreferredSnapshotVersion = _field
+ return nil
+}
+func (p *TSnapshotRequest) ReadField10(iprot thrift.TProtocol) error {
+
+ var _field *bool
+ if v, err := iprot.ReadBool(); err != nil {
+ return err
+ } else {
+ _field = &v
+ }
+ p.IsCopyTabletTask = _field
+ return nil
+}
+func (p *TSnapshotRequest) ReadField11(iprot thrift.TProtocol) error {
+
+ var _field *types.TVersion
+ if v, err := iprot.ReadI64(); err != nil {
+ return err
+ } else {
+ _field = &v
+ }
+ p.StartVersion = _field
+ return nil
+}
+func (p *TSnapshotRequest) ReadField12(iprot thrift.TProtocol) error {
+
+ var _field *types.TVersion
+ if v, err := iprot.ReadI64(); err != nil {
+ return err
+ } else {
+ _field = &v
+ }
+ p.EndVersion = _field
+ return nil
+}
+func (p *TSnapshotRequest) ReadField13(iprot thrift.TProtocol) error {
+
+ var _field *bool
+ if v, err := iprot.ReadBool(); err != nil {
+ return err
+ } else {
+ _field = &v
+ }
+ p.IsCopyBinlog = _field
+ return nil
+}
+func (p *TSnapshotRequest) ReadField14(iprot thrift.TProtocol) error {
+
+ var _field *types.TTabletId
+ if v, err := iprot.ReadI64(); err != nil {
+ return err
+ } else {
+ _field = &v
+ }
+ p.RefTabletId = _field
+ return nil
+}
+
+func (p *TSnapshotRequest) Write(oprot thrift.TProtocol) (err error) {
+ var fieldId int16
+ if err = oprot.WriteStructBegin("TSnapshotRequest"); err != nil {
+ goto WriteStructBeginError
+ }
+ if p != nil {
+ if err = p.writeField1(oprot); err != nil {
+ fieldId = 1
+ goto WriteFieldError
+ }
+ if err = p.writeField2(oprot); err != nil {
+ fieldId = 2
+ goto WriteFieldError
+ }
+ if err = p.writeField3(oprot); err != nil {
+ fieldId = 3
+ goto WriteFieldError
+ }
+ if err = p.writeField4(oprot); err != nil {
+ fieldId = 4
+ goto WriteFieldError
+ }
+ if err = p.writeField5(oprot); err != nil {
+ fieldId = 5
+ goto WriteFieldError
+ }
+ if err = p.writeField6(oprot); err != nil {
+ fieldId = 6
+ goto WriteFieldError
+ }
+ if err = p.writeField7(oprot); err != nil {
+ fieldId = 7
+ goto WriteFieldError
+ }
+ if err = p.writeField8(oprot); err != nil {
+ fieldId = 8
+ goto WriteFieldError
+ }
+ if err = p.writeField9(oprot); err != nil {
+ fieldId = 9
+ goto WriteFieldError
+ }
+ if err = p.writeField10(oprot); err != nil {
+ fieldId = 10
+ goto WriteFieldError
+ }
+ if err = p.writeField11(oprot); err != nil {
+ fieldId = 11
+ goto WriteFieldError
+ }
+ if err = p.writeField12(oprot); err != nil {
+ fieldId = 12
+ goto WriteFieldError
+ }
+ if err = p.writeField13(oprot); err != nil {
+ fieldId = 13
+ goto WriteFieldError
+ }
+ if err = p.writeField14(oprot); err != nil {
+ fieldId = 14
+ goto WriteFieldError
+ }
+ }
+ if err = oprot.WriteFieldStop(); err != nil {
+ goto WriteFieldStopError
+ }
+ if err = oprot.WriteStructEnd(); err != nil {
+ goto WriteStructEndError
+ }
+ return nil
+WriteStructBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+WriteFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err)
+WriteFieldStopError:
+ return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err)
+WriteStructEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err)
+}
+
+func (p *TSnapshotRequest) writeField1(oprot thrift.TProtocol) (err error) {
+ if err = oprot.WriteFieldBegin("tablet_id", thrift.I64, 1); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteI64(p.TabletId); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err)
+}
+
+func (p *TSnapshotRequest) writeField2(oprot thrift.TProtocol) (err error) {
+ if err = oprot.WriteFieldBegin("schema_hash", thrift.I32, 2); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteI32(p.SchemaHash); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err)
+}
+
+func (p *TSnapshotRequest) writeField3(oprot thrift.TProtocol) (err error) {
+ if p.IsSetVersion() {
+ if err = oprot.WriteFieldBegin("version", thrift.I64, 3); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteI64(*p.Version); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err)
+}
+
+func (p *TSnapshotRequest) writeField4(oprot thrift.TProtocol) (err error) {
+ if p.IsSetVersionHash() {
+ if err = oprot.WriteFieldBegin("version_hash", thrift.I64, 4); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteI64(*p.VersionHash); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err)
+}
+
+func (p *TSnapshotRequest) writeField5(oprot thrift.TProtocol) (err error) {
+ if p.IsSetTimeout() {
+ if err = oprot.WriteFieldBegin("timeout", thrift.I64, 5); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteI64(*p.Timeout); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 5 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err)
+}
+
+func (p *TSnapshotRequest) writeField6(oprot thrift.TProtocol) (err error) {
+ if p.IsSetMissingVersion() {
+ if err = oprot.WriteFieldBegin("missing_version", thrift.LIST, 6); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteListBegin(thrift.I64, len(p.MissingVersion)); err != nil {
+ return err
+ }
+ for _, v := range p.MissingVersion {
+ if err := oprot.WriteI64(v); err != nil {
+ return err
+ }
+ }
+ if err := oprot.WriteListEnd(); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 6 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 6 end error: ", p), err)
+}
+
+func (p *TSnapshotRequest) writeField7(oprot thrift.TProtocol) (err error) {
+ if p.IsSetListFiles() {
+ if err = oprot.WriteFieldBegin("list_files", thrift.BOOL, 7); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteBool(*p.ListFiles); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 7 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 7 end error: ", p), err)
+}
+
+func (p *TSnapshotRequest) writeField8(oprot thrift.TProtocol) (err error) {
+ if p.IsSetAllowIncrementalClone() {
+ if err = oprot.WriteFieldBegin("allow_incremental_clone", thrift.BOOL, 8); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteBool(*p.AllowIncrementalClone); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 8 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 8 end error: ", p), err)
+}
+
+func (p *TSnapshotRequest) writeField9(oprot thrift.TProtocol) (err error) {
+ if p.IsSetPreferredSnapshotVersion() {
+ if err = oprot.WriteFieldBegin("preferred_snapshot_version", thrift.I32, 9); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteI32(p.PreferredSnapshotVersion); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 9 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 9 end error: ", p), err)
+}
+
+func (p *TSnapshotRequest) writeField10(oprot thrift.TProtocol) (err error) {
+ if p.IsSetIsCopyTabletTask() {
+ if err = oprot.WriteFieldBegin("is_copy_tablet_task", thrift.BOOL, 10); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteBool(*p.IsCopyTabletTask); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 10 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 10 end error: ", p), err)
+}
+
+func (p *TSnapshotRequest) writeField11(oprot thrift.TProtocol) (err error) {
+ if p.IsSetStartVersion() {
+ if err = oprot.WriteFieldBegin("start_version", thrift.I64, 11); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteI64(*p.StartVersion); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 11 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 11 end error: ", p), err)
+}
+
+func (p *TSnapshotRequest) writeField12(oprot thrift.TProtocol) (err error) {
+ if p.IsSetEndVersion() {
+ if err = oprot.WriteFieldBegin("end_version", thrift.I64, 12); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteI64(*p.EndVersion); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 12 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 12 end error: ", p), err)
+}
+
+func (p *TSnapshotRequest) writeField13(oprot thrift.TProtocol) (err error) {
+ if p.IsSetIsCopyBinlog() {
+ if err = oprot.WriteFieldBegin("is_copy_binlog", thrift.BOOL, 13); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteBool(*p.IsCopyBinlog); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 13 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 13 end error: ", p), err)
+}
+
+func (p *TSnapshotRequest) writeField14(oprot thrift.TProtocol) (err error) {
+ if p.IsSetRefTabletId() {
+ if err = oprot.WriteFieldBegin("ref_tablet_id", thrift.I64, 14); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteI64(*p.RefTabletId); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 14 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 14 end error: ", p), err)
+}
+
+func (p *TSnapshotRequest) String() string {
+ if p == nil {
+ return ""
}
+ return fmt.Sprintf("TSnapshotRequest(%+v)", *p)
- if !issetTabletId {
- fieldId = 1
- goto RequiredFieldNotSetError
+}
+
+func (p *TSnapshotRequest) DeepEqual(ano *TSnapshotRequest) bool {
+ if p == ano {
+ return true
+ } else if p == nil || ano == nil {
+ return false
+ }
+ if !p.Field1DeepEqual(ano.TabletId) {
+ return false
+ }
+ if !p.Field2DeepEqual(ano.SchemaHash) {
+ return false
+ }
+ if !p.Field3DeepEqual(ano.Version) {
+ return false
+ }
+ if !p.Field4DeepEqual(ano.VersionHash) {
+ return false
+ }
+ if !p.Field5DeepEqual(ano.Timeout) {
+ return false
+ }
+ if !p.Field6DeepEqual(ano.MissingVersion) {
+ return false
+ }
+ if !p.Field7DeepEqual(ano.ListFiles) {
+ return false
+ }
+ if !p.Field8DeepEqual(ano.AllowIncrementalClone) {
+ return false
+ }
+ if !p.Field9DeepEqual(ano.PreferredSnapshotVersion) {
+ return false
+ }
+ if !p.Field10DeepEqual(ano.IsCopyTabletTask) {
+ return false
+ }
+ if !p.Field11DeepEqual(ano.StartVersion) {
+ return false
+ }
+ if !p.Field12DeepEqual(ano.EndVersion) {
+ return false
+ }
+ if !p.Field13DeepEqual(ano.IsCopyBinlog) {
+ return false
+ }
+ if !p.Field14DeepEqual(ano.RefTabletId) {
+ return false
}
+ return true
+}
- if !issetSchemaHash {
- fieldId = 2
- goto RequiredFieldNotSetError
+func (p *TSnapshotRequest) Field1DeepEqual(src types.TTabletId) bool {
+
+ if p.TabletId != src {
+ return false
}
- return nil
-ReadStructBeginError:
- return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err)
-ReadFieldBeginError:
- return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
-ReadFieldError:
- return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TSnapshotRequest[fieldId]), err)
-SkipFieldError:
- return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
+ return true
+}
+func (p *TSnapshotRequest) Field2DeepEqual(src types.TSchemaHash) bool {
-ReadFieldEndError:
- return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err)
-ReadStructEndError:
- return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
-RequiredFieldNotSetError:
- return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TSnapshotRequest[fieldId]))
+ if p.SchemaHash != src {
+ return false
+ }
+ return true
}
+func (p *TSnapshotRequest) Field3DeepEqual(src *types.TVersion) bool {
-func (p *TSnapshotRequest) ReadField1(iprot thrift.TProtocol) error {
- if v, err := iprot.ReadI64(); err != nil {
- return err
- } else {
- p.TabletId = v
+ if p.Version == src {
+ return true
+ } else if p.Version == nil || src == nil {
+ return false
}
- return nil
+ if *p.Version != *src {
+ return false
+ }
+ return true
}
+func (p *TSnapshotRequest) Field4DeepEqual(src *types.TVersionHash) bool {
-func (p *TSnapshotRequest) ReadField2(iprot thrift.TProtocol) error {
- if v, err := iprot.ReadI32(); err != nil {
- return err
- } else {
- p.SchemaHash = v
+ if p.VersionHash == src {
+ return true
+ } else if p.VersionHash == nil || src == nil {
+ return false
}
- return nil
+ if *p.VersionHash != *src {
+ return false
+ }
+ return true
}
+func (p *TSnapshotRequest) Field5DeepEqual(src *int64) bool {
-func (p *TSnapshotRequest) ReadField3(iprot thrift.TProtocol) error {
- if v, err := iprot.ReadI64(); err != nil {
- return err
- } else {
- p.Version = &v
+ if p.Timeout == src {
+ return true
+ } else if p.Timeout == nil || src == nil {
+ return false
}
- return nil
+ if *p.Timeout != *src {
+ return false
+ }
+ return true
}
+func (p *TSnapshotRequest) Field6DeepEqual(src []types.TVersion) bool {
-func (p *TSnapshotRequest) ReadField4(iprot thrift.TProtocol) error {
- if v, err := iprot.ReadI64(); err != nil {
- return err
- } else {
- p.VersionHash = &v
+ if len(p.MissingVersion) != len(src) {
+ return false
}
- return nil
+ for i, v := range p.MissingVersion {
+ _src := src[i]
+ if v != _src {
+ return false
+ }
+ }
+ return true
}
+func (p *TSnapshotRequest) Field7DeepEqual(src *bool) bool {
-func (p *TSnapshotRequest) ReadField5(iprot thrift.TProtocol) error {
- if v, err := iprot.ReadI64(); err != nil {
- return err
- } else {
- p.Timeout = &v
+ if p.ListFiles == src {
+ return true
+ } else if p.ListFiles == nil || src == nil {
+ return false
}
- return nil
+ if *p.ListFiles != *src {
+ return false
+ }
+ return true
}
+func (p *TSnapshotRequest) Field8DeepEqual(src *bool) bool {
-func (p *TSnapshotRequest) ReadField6(iprot thrift.TProtocol) error {
- _, size, err := iprot.ReadListBegin()
- if err != nil {
- return err
+ if p.AllowIncrementalClone == src {
+ return true
+ } else if p.AllowIncrementalClone == nil || src == nil {
+ return false
}
- p.MissingVersion = make([]types.TVersion, 0, size)
- for i := 0; i < size; i++ {
- var _elem types.TVersion
- if v, err := iprot.ReadI64(); err != nil {
- return err
- } else {
- _elem = v
- }
+ if *p.AllowIncrementalClone != *src {
+ return false
+ }
+ return true
+}
+func (p *TSnapshotRequest) Field9DeepEqual(src int32) bool {
- p.MissingVersion = append(p.MissingVersion, _elem)
+ if p.PreferredSnapshotVersion != src {
+ return false
}
- if err := iprot.ReadListEnd(); err != nil {
- return err
+ return true
+}
+func (p *TSnapshotRequest) Field10DeepEqual(src *bool) bool {
+
+ if p.IsCopyTabletTask == src {
+ return true
+ } else if p.IsCopyTabletTask == nil || src == nil {
+ return false
}
- return nil
+ if *p.IsCopyTabletTask != *src {
+ return false
+ }
+ return true
+}
+func (p *TSnapshotRequest) Field11DeepEqual(src *types.TVersion) bool {
+
+ if p.StartVersion == src {
+ return true
+ } else if p.StartVersion == nil || src == nil {
+ return false
+ }
+ if *p.StartVersion != *src {
+ return false
+ }
+ return true
+}
+func (p *TSnapshotRequest) Field12DeepEqual(src *types.TVersion) bool {
+
+ if p.EndVersion == src {
+ return true
+ } else if p.EndVersion == nil || src == nil {
+ return false
+ }
+ if *p.EndVersion != *src {
+ return false
+ }
+ return true
+}
+func (p *TSnapshotRequest) Field13DeepEqual(src *bool) bool {
+
+ if p.IsCopyBinlog == src {
+ return true
+ } else if p.IsCopyBinlog == nil || src == nil {
+ return false
+ }
+ if *p.IsCopyBinlog != *src {
+ return false
+ }
+ return true
+}
+func (p *TSnapshotRequest) Field14DeepEqual(src *types.TTabletId) bool {
+
+ if p.RefTabletId == src {
+ return true
+ } else if p.RefTabletId == nil || src == nil {
+ return false
+ }
+ if *p.RefTabletId != *src {
+ return false
+ }
+ return true
+}
+
+type TReleaseSnapshotRequest struct {
+ SnapshotPath string `thrift:"snapshot_path,1,required" frugal:"1,required,string" json:"snapshot_path"`
+}
+
+func NewTReleaseSnapshotRequest() *TReleaseSnapshotRequest {
+ return &TReleaseSnapshotRequest{}
}
-func (p *TSnapshotRequest) ReadField7(iprot thrift.TProtocol) error {
- if v, err := iprot.ReadBool(); err != nil {
- return err
- } else {
- p.ListFiles = &v
- }
- return nil
+func (p *TReleaseSnapshotRequest) InitDefault() {
}
-func (p *TSnapshotRequest) ReadField8(iprot thrift.TProtocol) error {
- if v, err := iprot.ReadBool(); err != nil {
- return err
- } else {
- p.AllowIncrementalClone = &v
- }
- return nil
+func (p *TReleaseSnapshotRequest) GetSnapshotPath() (v string) {
+ return p.SnapshotPath
+}
+func (p *TReleaseSnapshotRequest) SetSnapshotPath(val string) {
+ p.SnapshotPath = val
}
-func (p *TSnapshotRequest) ReadField9(iprot thrift.TProtocol) error {
- if v, err := iprot.ReadI32(); err != nil {
- return err
- } else {
- p.PreferredSnapshotVersion = v
- }
- return nil
+var fieldIDToName_TReleaseSnapshotRequest = map[int16]string{
+ 1: "snapshot_path",
}
-func (p *TSnapshotRequest) ReadField10(iprot thrift.TProtocol) error {
- if v, err := iprot.ReadBool(); err != nil {
- return err
- } else {
- p.IsCopyTabletTask = &v
+func (p *TReleaseSnapshotRequest) Read(iprot thrift.TProtocol) (err error) {
+
+ var fieldTypeId thrift.TType
+ var fieldId int16
+ var issetSnapshotPath bool = false
+
+ if _, err = iprot.ReadStructBegin(); err != nil {
+ goto ReadStructBeginError
}
- return nil
-}
-func (p *TSnapshotRequest) ReadField11(iprot thrift.TProtocol) error {
- if v, err := iprot.ReadI64(); err != nil {
- return err
- } else {
- p.StartVersion = &v
+ for {
+ _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin()
+ if err != nil {
+ goto ReadFieldBeginError
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+
+ switch fieldId {
+ case 1:
+ if fieldTypeId == thrift.STRING {
+ if err = p.ReadField1(iprot); err != nil {
+ goto ReadFieldError
+ }
+ issetSnapshotPath = true
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ default:
+ if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ }
+ if err = iprot.ReadFieldEnd(); err != nil {
+ goto ReadFieldEndError
+ }
+ }
+ if err = iprot.ReadStructEnd(); err != nil {
+ goto ReadStructEndError
}
- return nil
-}
-func (p *TSnapshotRequest) ReadField12(iprot thrift.TProtocol) error {
- if v, err := iprot.ReadI64(); err != nil {
- return err
- } else {
- p.EndVersion = &v
+ if !issetSnapshotPath {
+ fieldId = 1
+ goto RequiredFieldNotSetError
}
return nil
+ReadStructBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err)
+ReadFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
+ReadFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TReleaseSnapshotRequest[fieldId]), err)
+SkipFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
+
+ReadFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err)
+ReadStructEndError:
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+RequiredFieldNotSetError:
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TReleaseSnapshotRequest[fieldId]))
}
-func (p *TSnapshotRequest) ReadField13(iprot thrift.TProtocol) error {
- if v, err := iprot.ReadBool(); err != nil {
+func (p *TReleaseSnapshotRequest) ReadField1(iprot thrift.TProtocol) error {
+
+ var _field string
+ if v, err := iprot.ReadString(); err != nil {
return err
} else {
- p.IsCopyBinlog = &v
+ _field = v
}
+ p.SnapshotPath = _field
return nil
}
-func (p *TSnapshotRequest) Write(oprot thrift.TProtocol) (err error) {
+func (p *TReleaseSnapshotRequest) Write(oprot thrift.TProtocol) (err error) {
var fieldId int16
- if err = oprot.WriteStructBegin("TSnapshotRequest"); err != nil {
+ if err = oprot.WriteStructBegin("TReleaseSnapshotRequest"); err != nil {
goto WriteStructBeginError
}
if p != nil {
@@ -16454,55 +18978,6 @@ func (p *TSnapshotRequest) Write(oprot thrift.TProtocol) (err error) {
fieldId = 1
goto WriteFieldError
}
- if err = p.writeField2(oprot); err != nil {
- fieldId = 2
- goto WriteFieldError
- }
- if err = p.writeField3(oprot); err != nil {
- fieldId = 3
- goto WriteFieldError
- }
- if err = p.writeField4(oprot); err != nil {
- fieldId = 4
- goto WriteFieldError
- }
- if err = p.writeField5(oprot); err != nil {
- fieldId = 5
- goto WriteFieldError
- }
- if err = p.writeField6(oprot); err != nil {
- fieldId = 6
- goto WriteFieldError
- }
- if err = p.writeField7(oprot); err != nil {
- fieldId = 7
- goto WriteFieldError
- }
- if err = p.writeField8(oprot); err != nil {
- fieldId = 8
- goto WriteFieldError
- }
- if err = p.writeField9(oprot); err != nil {
- fieldId = 9
- goto WriteFieldError
- }
- if err = p.writeField10(oprot); err != nil {
- fieldId = 10
- goto WriteFieldError
- }
- if err = p.writeField11(oprot); err != nil {
- fieldId = 11
- goto WriteFieldError
- }
- if err = p.writeField12(oprot); err != nil {
- fieldId = 12
- goto WriteFieldError
- }
- if err = p.writeField13(oprot); err != nil {
- fieldId = 13
- goto WriteFieldError
- }
-
}
if err = oprot.WriteFieldStop(); err != nil {
goto WriteFieldStopError
@@ -16521,11 +18996,11 @@ WriteStructEndError:
return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err)
}
-func (p *TSnapshotRequest) writeField1(oprot thrift.TProtocol) (err error) {
- if err = oprot.WriteFieldBegin("tablet_id", thrift.I64, 1); err != nil {
+func (p *TReleaseSnapshotRequest) writeField1(oprot thrift.TProtocol) (err error) {
+ if err = oprot.WriteFieldBegin("snapshot_path", thrift.STRING, 1); err != nil {
goto WriteFieldBeginError
}
- if err := oprot.WriteI64(p.TabletId); err != nil {
+ if err := oprot.WriteString(p.SnapshotPath); err != nil {
return err
}
if err = oprot.WriteFieldEnd(); err != nil {
@@ -16538,466 +19013,676 @@ WriteFieldEndError:
return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err)
}
-func (p *TSnapshotRequest) writeField2(oprot thrift.TProtocol) (err error) {
- if err = oprot.WriteFieldBegin("schema_hash", thrift.I32, 2); err != nil {
- goto WriteFieldBeginError
+func (p *TReleaseSnapshotRequest) String() string {
+ if p == nil {
+ return ""
}
- if err := oprot.WriteI32(p.SchemaHash); err != nil {
- return err
+ return fmt.Sprintf("TReleaseSnapshotRequest(%+v)", *p)
+
+}
+
+func (p *TReleaseSnapshotRequest) DeepEqual(ano *TReleaseSnapshotRequest) bool {
+ if p == ano {
+ return true
+ } else if p == nil || ano == nil {
+ return false
}
- if err = oprot.WriteFieldEnd(); err != nil {
- goto WriteFieldEndError
+ if !p.Field1DeepEqual(ano.SnapshotPath) {
+ return false
}
- return nil
-WriteFieldBeginError:
- return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err)
-WriteFieldEndError:
- return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err)
+ return true
}
-func (p *TSnapshotRequest) writeField3(oprot thrift.TProtocol) (err error) {
- if p.IsSetVersion() {
- if err = oprot.WriteFieldBegin("version", thrift.I64, 3); err != nil {
- goto WriteFieldBeginError
- }
- if err := oprot.WriteI64(*p.Version); err != nil {
- return err
- }
- if err = oprot.WriteFieldEnd(); err != nil {
- goto WriteFieldEndError
- }
+func (p *TReleaseSnapshotRequest) Field1DeepEqual(src string) bool {
+
+ if strings.Compare(p.SnapshotPath, src) != 0 {
+ return false
}
- return nil
-WriteFieldBeginError:
- return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err)
-WriteFieldEndError:
- return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err)
+ return true
}
-func (p *TSnapshotRequest) writeField4(oprot thrift.TProtocol) (err error) {
- if p.IsSetVersionHash() {
- if err = oprot.WriteFieldBegin("version_hash", thrift.I64, 4); err != nil {
- goto WriteFieldBeginError
- }
- if err := oprot.WriteI64(*p.VersionHash); err != nil {
- return err
- }
- if err = oprot.WriteFieldEnd(); err != nil {
- goto WriteFieldEndError
- }
- }
- return nil
-WriteFieldBeginError:
- return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err)
-WriteFieldEndError:
- return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err)
+type TClearRemoteFileReq struct {
+ RemoteFilePath string `thrift:"remote_file_path,1,required" frugal:"1,required,string" json:"remote_file_path"`
+ RemoteSourceProperties map[string]string `thrift:"remote_source_properties,2,required" frugal:"2,required,map" json:"remote_source_properties"`
+}
+
+func NewTClearRemoteFileReq() *TClearRemoteFileReq {
+ return &TClearRemoteFileReq{}
+}
+
+func (p *TClearRemoteFileReq) InitDefault() {
+}
+
+func (p *TClearRemoteFileReq) GetRemoteFilePath() (v string) {
+ return p.RemoteFilePath
+}
+
+func (p *TClearRemoteFileReq) GetRemoteSourceProperties() (v map[string]string) {
+ return p.RemoteSourceProperties
+}
+func (p *TClearRemoteFileReq) SetRemoteFilePath(val string) {
+ p.RemoteFilePath = val
+}
+func (p *TClearRemoteFileReq) SetRemoteSourceProperties(val map[string]string) {
+ p.RemoteSourceProperties = val
+}
+
+var fieldIDToName_TClearRemoteFileReq = map[int16]string{
+ 1: "remote_file_path",
+ 2: "remote_source_properties",
}
-func (p *TSnapshotRequest) writeField5(oprot thrift.TProtocol) (err error) {
- if p.IsSetTimeout() {
- if err = oprot.WriteFieldBegin("timeout", thrift.I64, 5); err != nil {
- goto WriteFieldBeginError
- }
- if err := oprot.WriteI64(*p.Timeout); err != nil {
- return err
- }
- if err = oprot.WriteFieldEnd(); err != nil {
- goto WriteFieldEndError
- }
+func (p *TClearRemoteFileReq) Read(iprot thrift.TProtocol) (err error) {
+
+ var fieldTypeId thrift.TType
+ var fieldId int16
+ var issetRemoteFilePath bool = false
+ var issetRemoteSourceProperties bool = false
+
+ if _, err = iprot.ReadStructBegin(); err != nil {
+ goto ReadStructBeginError
}
- return nil
-WriteFieldBeginError:
- return thrift.PrependError(fmt.Sprintf("%T write field 5 begin error: ", p), err)
-WriteFieldEndError:
- return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err)
-}
-func (p *TSnapshotRequest) writeField6(oprot thrift.TProtocol) (err error) {
- if p.IsSetMissingVersion() {
- if err = oprot.WriteFieldBegin("missing_version", thrift.LIST, 6); err != nil {
- goto WriteFieldBeginError
+ for {
+ _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin()
+ if err != nil {
+ goto ReadFieldBeginError
}
- if err := oprot.WriteListBegin(thrift.I64, len(p.MissingVersion)); err != nil {
- return err
+ if fieldTypeId == thrift.STOP {
+ break
}
- for _, v := range p.MissingVersion {
- if err := oprot.WriteI64(v); err != nil {
- return err
+
+ switch fieldId {
+ case 1:
+ if fieldTypeId == thrift.STRING {
+ if err = p.ReadField1(iprot); err != nil {
+ goto ReadFieldError
+ }
+ issetRemoteFilePath = true
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ case 2:
+ if fieldTypeId == thrift.MAP {
+ if err = p.ReadField2(iprot); err != nil {
+ goto ReadFieldError
+ }
+ issetRemoteSourceProperties = true
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ default:
+ if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
}
- if err := oprot.WriteListEnd(); err != nil {
- return err
- }
- if err = oprot.WriteFieldEnd(); err != nil {
- goto WriteFieldEndError
+ if err = iprot.ReadFieldEnd(); err != nil {
+ goto ReadFieldEndError
}
}
- return nil
-WriteFieldBeginError:
- return thrift.PrependError(fmt.Sprintf("%T write field 6 begin error: ", p), err)
-WriteFieldEndError:
- return thrift.PrependError(fmt.Sprintf("%T write field 6 end error: ", p), err)
-}
+ if err = iprot.ReadStructEnd(); err != nil {
+ goto ReadStructEndError
+ }
-func (p *TSnapshotRequest) writeField7(oprot thrift.TProtocol) (err error) {
- if p.IsSetListFiles() {
- if err = oprot.WriteFieldBegin("list_files", thrift.BOOL, 7); err != nil {
- goto WriteFieldBeginError
- }
- if err := oprot.WriteBool(*p.ListFiles); err != nil {
- return err
- }
- if err = oprot.WriteFieldEnd(); err != nil {
- goto WriteFieldEndError
- }
+ if !issetRemoteFilePath {
+ fieldId = 1
+ goto RequiredFieldNotSetError
}
- return nil
-WriteFieldBeginError:
- return thrift.PrependError(fmt.Sprintf("%T write field 7 begin error: ", p), err)
-WriteFieldEndError:
- return thrift.PrependError(fmt.Sprintf("%T write field 7 end error: ", p), err)
-}
-func (p *TSnapshotRequest) writeField8(oprot thrift.TProtocol) (err error) {
- if p.IsSetAllowIncrementalClone() {
- if err = oprot.WriteFieldBegin("allow_incremental_clone", thrift.BOOL, 8); err != nil {
- goto WriteFieldBeginError
- }
- if err := oprot.WriteBool(*p.AllowIncrementalClone); err != nil {
- return err
- }
- if err = oprot.WriteFieldEnd(); err != nil {
- goto WriteFieldEndError
- }
+ if !issetRemoteSourceProperties {
+ fieldId = 2
+ goto RequiredFieldNotSetError
}
return nil
-WriteFieldBeginError:
- return thrift.PrependError(fmt.Sprintf("%T write field 8 begin error: ", p), err)
-WriteFieldEndError:
- return thrift.PrependError(fmt.Sprintf("%T write field 8 end error: ", p), err)
+ReadStructBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err)
+ReadFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
+ReadFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TClearRemoteFileReq[fieldId]), err)
+SkipFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
+
+ReadFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err)
+ReadStructEndError:
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+RequiredFieldNotSetError:
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TClearRemoteFileReq[fieldId]))
}
-func (p *TSnapshotRequest) writeField9(oprot thrift.TProtocol) (err error) {
- if p.IsSetPreferredSnapshotVersion() {
- if err = oprot.WriteFieldBegin("preferred_snapshot_version", thrift.I32, 9); err != nil {
- goto WriteFieldBeginError
- }
- if err := oprot.WriteI32(p.PreferredSnapshotVersion); err != nil {
- return err
- }
- if err = oprot.WriteFieldEnd(); err != nil {
- goto WriteFieldEndError
- }
+func (p *TClearRemoteFileReq) ReadField1(iprot thrift.TProtocol) error {
+
+ var _field string
+ if v, err := iprot.ReadString(); err != nil {
+ return err
+ } else {
+ _field = v
}
+ p.RemoteFilePath = _field
return nil
-WriteFieldBeginError:
- return thrift.PrependError(fmt.Sprintf("%T write field 9 begin error: ", p), err)
-WriteFieldEndError:
- return thrift.PrependError(fmt.Sprintf("%T write field 9 end error: ", p), err)
}
-
-func (p *TSnapshotRequest) writeField10(oprot thrift.TProtocol) (err error) {
- if p.IsSetIsCopyTabletTask() {
- if err = oprot.WriteFieldBegin("is_copy_tablet_task", thrift.BOOL, 10); err != nil {
- goto WriteFieldBeginError
- }
- if err := oprot.WriteBool(*p.IsCopyTabletTask); err != nil {
+func (p *TClearRemoteFileReq) ReadField2(iprot thrift.TProtocol) error {
+ _, _, size, err := iprot.ReadMapBegin()
+ if err != nil {
+ return err
+ }
+ _field := make(map[string]string, size)
+ for i := 0; i < size; i++ {
+ var _key string
+ if v, err := iprot.ReadString(); err != nil {
return err
+ } else {
+ _key = v
}
- if err = oprot.WriteFieldEnd(); err != nil {
- goto WriteFieldEndError
+
+ var _val string
+ if v, err := iprot.ReadString(); err != nil {
+ return err
+ } else {
+ _val = v
}
+
+ _field[_key] = _val
+ }
+ if err := iprot.ReadMapEnd(); err != nil {
+ return err
}
+ p.RemoteSourceProperties = _field
return nil
-WriteFieldBeginError:
- return thrift.PrependError(fmt.Sprintf("%T write field 10 begin error: ", p), err)
-WriteFieldEndError:
- return thrift.PrependError(fmt.Sprintf("%T write field 10 end error: ", p), err)
}
-func (p *TSnapshotRequest) writeField11(oprot thrift.TProtocol) (err error) {
- if p.IsSetStartVersion() {
- if err = oprot.WriteFieldBegin("start_version", thrift.I64, 11); err != nil {
- goto WriteFieldBeginError
- }
- if err := oprot.WriteI64(*p.StartVersion); err != nil {
- return err
+func (p *TClearRemoteFileReq) Write(oprot thrift.TProtocol) (err error) {
+ var fieldId int16
+ if err = oprot.WriteStructBegin("TClearRemoteFileReq"); err != nil {
+ goto WriteStructBeginError
+ }
+ if p != nil {
+ if err = p.writeField1(oprot); err != nil {
+ fieldId = 1
+ goto WriteFieldError
}
- if err = oprot.WriteFieldEnd(); err != nil {
- goto WriteFieldEndError
+ if err = p.writeField2(oprot); err != nil {
+ fieldId = 2
+ goto WriteFieldError
}
}
+ if err = oprot.WriteFieldStop(); err != nil {
+ goto WriteFieldStopError
+ }
+ if err = oprot.WriteStructEnd(); err != nil {
+ goto WriteStructEndError
+ }
return nil
-WriteFieldBeginError:
- return thrift.PrependError(fmt.Sprintf("%T write field 11 begin error: ", p), err)
-WriteFieldEndError:
- return thrift.PrependError(fmt.Sprintf("%T write field 11 end error: ", p), err)
+WriteStructBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+WriteFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err)
+WriteFieldStopError:
+ return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err)
+WriteStructEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err)
}
-func (p *TSnapshotRequest) writeField12(oprot thrift.TProtocol) (err error) {
- if p.IsSetEndVersion() {
- if err = oprot.WriteFieldBegin("end_version", thrift.I64, 12); err != nil {
- goto WriteFieldBeginError
- }
- if err := oprot.WriteI64(*p.EndVersion); err != nil {
- return err
- }
- if err = oprot.WriteFieldEnd(); err != nil {
- goto WriteFieldEndError
- }
+func (p *TClearRemoteFileReq) writeField1(oprot thrift.TProtocol) (err error) {
+ if err = oprot.WriteFieldBegin("remote_file_path", thrift.STRING, 1); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteString(p.RemoteFilePath); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
}
return nil
WriteFieldBeginError:
- return thrift.PrependError(fmt.Sprintf("%T write field 12 begin error: ", p), err)
+ return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err)
WriteFieldEndError:
- return thrift.PrependError(fmt.Sprintf("%T write field 12 end error: ", p), err)
+ return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err)
}
-func (p *TSnapshotRequest) writeField13(oprot thrift.TProtocol) (err error) {
- if p.IsSetIsCopyBinlog() {
- if err = oprot.WriteFieldBegin("is_copy_binlog", thrift.BOOL, 13); err != nil {
- goto WriteFieldBeginError
- }
- if err := oprot.WriteBool(*p.IsCopyBinlog); err != nil {
+func (p *TClearRemoteFileReq) writeField2(oprot thrift.TProtocol) (err error) {
+ if err = oprot.WriteFieldBegin("remote_source_properties", thrift.MAP, 2); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteMapBegin(thrift.STRING, thrift.STRING, len(p.RemoteSourceProperties)); err != nil {
+ return err
+ }
+ for k, v := range p.RemoteSourceProperties {
+ if err := oprot.WriteString(k); err != nil {
return err
}
- if err = oprot.WriteFieldEnd(); err != nil {
- goto WriteFieldEndError
+ if err := oprot.WriteString(v); err != nil {
+ return err
}
}
+ if err := oprot.WriteMapEnd(); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
return nil
WriteFieldBeginError:
- return thrift.PrependError(fmt.Sprintf("%T write field 13 begin error: ", p), err)
+ return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err)
WriteFieldEndError:
- return thrift.PrependError(fmt.Sprintf("%T write field 13 end error: ", p), err)
+ return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err)
}
-func (p *TSnapshotRequest) String() string {
+func (p *TClearRemoteFileReq) String() string {
if p == nil {
return ""
}
- return fmt.Sprintf("TSnapshotRequest(%+v)", *p)
+ return fmt.Sprintf("TClearRemoteFileReq(%+v)", *p)
+
}
-func (p *TSnapshotRequest) DeepEqual(ano *TSnapshotRequest) bool {
+func (p *TClearRemoteFileReq) DeepEqual(ano *TClearRemoteFileReq) bool {
if p == ano {
return true
} else if p == nil || ano == nil {
return false
}
- if !p.Field1DeepEqual(ano.TabletId) {
- return false
- }
- if !p.Field2DeepEqual(ano.SchemaHash) {
+ if !p.Field1DeepEqual(ano.RemoteFilePath) {
return false
}
- if !p.Field3DeepEqual(ano.Version) {
+ if !p.Field2DeepEqual(ano.RemoteSourceProperties) {
return false
}
- if !p.Field4DeepEqual(ano.VersionHash) {
+ return true
+}
+
+func (p *TClearRemoteFileReq) Field1DeepEqual(src string) bool {
+
+ if strings.Compare(p.RemoteFilePath, src) != 0 {
return false
}
- if !p.Field5DeepEqual(ano.Timeout) {
+ return true
+}
+func (p *TClearRemoteFileReq) Field2DeepEqual(src map[string]string) bool {
+
+ if len(p.RemoteSourceProperties) != len(src) {
return false
}
- if !p.Field6DeepEqual(ano.MissingVersion) {
- return false
+ for k, v := range p.RemoteSourceProperties {
+ _src := src[k]
+ if strings.Compare(v, _src) != 0 {
+ return false
+ }
}
- if !p.Field7DeepEqual(ano.ListFiles) {
- return false
+ return true
+}
+
+type TPartitionVersionInfo struct {
+ PartitionId types.TPartitionId `thrift:"partition_id,1,required" frugal:"1,required,i64" json:"partition_id"`
+ Version types.TVersion `thrift:"version,2,required" frugal:"2,required,i64" json:"version"`
+ VersionHash types.TVersionHash `thrift:"version_hash,3,required" frugal:"3,required,i64" json:"version_hash"`
+}
+
+func NewTPartitionVersionInfo() *TPartitionVersionInfo {
+ return &TPartitionVersionInfo{}
+}
+
+func (p *TPartitionVersionInfo) InitDefault() {
+}
+
+func (p *TPartitionVersionInfo) GetPartitionId() (v types.TPartitionId) {
+ return p.PartitionId
+}
+
+func (p *TPartitionVersionInfo) GetVersion() (v types.TVersion) {
+ return p.Version
+}
+
+func (p *TPartitionVersionInfo) GetVersionHash() (v types.TVersionHash) {
+ return p.VersionHash
+}
+func (p *TPartitionVersionInfo) SetPartitionId(val types.TPartitionId) {
+ p.PartitionId = val
+}
+func (p *TPartitionVersionInfo) SetVersion(val types.TVersion) {
+ p.Version = val
+}
+func (p *TPartitionVersionInfo) SetVersionHash(val types.TVersionHash) {
+ p.VersionHash = val
+}
+
+var fieldIDToName_TPartitionVersionInfo = map[int16]string{
+ 1: "partition_id",
+ 2: "version",
+ 3: "version_hash",
+}
+
+func (p *TPartitionVersionInfo) Read(iprot thrift.TProtocol) (err error) {
+
+ var fieldTypeId thrift.TType
+ var fieldId int16
+ var issetPartitionId bool = false
+ var issetVersion bool = false
+ var issetVersionHash bool = false
+
+ if _, err = iprot.ReadStructBegin(); err != nil {
+ goto ReadStructBeginError
}
- if !p.Field8DeepEqual(ano.AllowIncrementalClone) {
- return false
+
+ for {
+ _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin()
+ if err != nil {
+ goto ReadFieldBeginError
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+
+ switch fieldId {
+ case 1:
+ if fieldTypeId == thrift.I64 {
+ if err = p.ReadField1(iprot); err != nil {
+ goto ReadFieldError
+ }
+ issetPartitionId = true
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ case 2:
+ if fieldTypeId == thrift.I64 {
+ if err = p.ReadField2(iprot); err != nil {
+ goto ReadFieldError
+ }
+ issetVersion = true
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ case 3:
+ if fieldTypeId == thrift.I64 {
+ if err = p.ReadField3(iprot); err != nil {
+ goto ReadFieldError
+ }
+ issetVersionHash = true
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ default:
+ if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ }
+ if err = iprot.ReadFieldEnd(); err != nil {
+ goto ReadFieldEndError
+ }
}
- if !p.Field9DeepEqual(ano.PreferredSnapshotVersion) {
- return false
+ if err = iprot.ReadStructEnd(); err != nil {
+ goto ReadStructEndError
}
- if !p.Field10DeepEqual(ano.IsCopyTabletTask) {
- return false
+
+ if !issetPartitionId {
+ fieldId = 1
+ goto RequiredFieldNotSetError
}
- if !p.Field11DeepEqual(ano.StartVersion) {
- return false
+
+ if !issetVersion {
+ fieldId = 2
+ goto RequiredFieldNotSetError
}
- if !p.Field12DeepEqual(ano.EndVersion) {
- return false
+
+ if !issetVersionHash {
+ fieldId = 3
+ goto RequiredFieldNotSetError
}
- if !p.Field13DeepEqual(ano.IsCopyBinlog) {
- return false
+ return nil
+ReadStructBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err)
+ReadFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
+ReadFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TPartitionVersionInfo[fieldId]), err)
+SkipFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
+
+ReadFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err)
+ReadStructEndError:
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+RequiredFieldNotSetError:
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TPartitionVersionInfo[fieldId]))
+}
+
+func (p *TPartitionVersionInfo) ReadField1(iprot thrift.TProtocol) error {
+
+ var _field types.TPartitionId
+ if v, err := iprot.ReadI64(); err != nil {
+ return err
+ } else {
+ _field = v
}
- return true
+ p.PartitionId = _field
+ return nil
}
+func (p *TPartitionVersionInfo) ReadField2(iprot thrift.TProtocol) error {
-func (p *TSnapshotRequest) Field1DeepEqual(src types.TTabletId) bool {
+ var _field types.TVersion
+ if v, err := iprot.ReadI64(); err != nil {
+ return err
+ } else {
+ _field = v
+ }
+ p.Version = _field
+ return nil
+}
+func (p *TPartitionVersionInfo) ReadField3(iprot thrift.TProtocol) error {
- if p.TabletId != src {
- return false
+ var _field types.TVersionHash
+ if v, err := iprot.ReadI64(); err != nil {
+ return err
+ } else {
+ _field = v
}
- return true
+ p.VersionHash = _field
+ return nil
}
-func (p *TSnapshotRequest) Field2DeepEqual(src types.TSchemaHash) bool {
- if p.SchemaHash != src {
- return false
+func (p *TPartitionVersionInfo) Write(oprot thrift.TProtocol) (err error) {
+ var fieldId int16
+ if err = oprot.WriteStructBegin("TPartitionVersionInfo"); err != nil {
+ goto WriteStructBeginError
}
- return true
+ if p != nil {
+ if err = p.writeField1(oprot); err != nil {
+ fieldId = 1
+ goto WriteFieldError
+ }
+ if err = p.writeField2(oprot); err != nil {
+ fieldId = 2
+ goto WriteFieldError
+ }
+ if err = p.writeField3(oprot); err != nil {
+ fieldId = 3
+ goto WriteFieldError
+ }
+ }
+ if err = oprot.WriteFieldStop(); err != nil {
+ goto WriteFieldStopError
+ }
+ if err = oprot.WriteStructEnd(); err != nil {
+ goto WriteStructEndError
+ }
+ return nil
+WriteStructBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+WriteFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err)
+WriteFieldStopError:
+ return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err)
+WriteStructEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err)
}
-func (p *TSnapshotRequest) Field3DeepEqual(src *types.TVersion) bool {
- if p.Version == src {
- return true
- } else if p.Version == nil || src == nil {
- return false
+func (p *TPartitionVersionInfo) writeField1(oprot thrift.TProtocol) (err error) {
+ if err = oprot.WriteFieldBegin("partition_id", thrift.I64, 1); err != nil {
+ goto WriteFieldBeginError
}
- if *p.Version != *src {
- return false
+ if err := oprot.WriteI64(p.PartitionId); err != nil {
+ return err
}
- return true
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err)
}
-func (p *TSnapshotRequest) Field4DeepEqual(src *types.TVersionHash) bool {
- if p.VersionHash == src {
- return true
- } else if p.VersionHash == nil || src == nil {
- return false
+func (p *TPartitionVersionInfo) writeField2(oprot thrift.TProtocol) (err error) {
+ if err = oprot.WriteFieldBegin("version", thrift.I64, 2); err != nil {
+ goto WriteFieldBeginError
}
- if *p.VersionHash != *src {
- return false
+ if err := oprot.WriteI64(p.Version); err != nil {
+ return err
}
- return true
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err)
}
-func (p *TSnapshotRequest) Field5DeepEqual(src *int64) bool {
- if p.Timeout == src {
- return true
- } else if p.Timeout == nil || src == nil {
- return false
+func (p *TPartitionVersionInfo) writeField3(oprot thrift.TProtocol) (err error) {
+ if err = oprot.WriteFieldBegin("version_hash", thrift.I64, 3); err != nil {
+ goto WriteFieldBeginError
}
- if *p.Timeout != *src {
- return false
+ if err := oprot.WriteI64(p.VersionHash); err != nil {
+ return err
}
- return true
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err)
}
-func (p *TSnapshotRequest) Field6DeepEqual(src []types.TVersion) bool {
- if len(p.MissingVersion) != len(src) {
- return false
- }
- for i, v := range p.MissingVersion {
- _src := src[i]
- if v != _src {
- return false
- }
+func (p *TPartitionVersionInfo) String() string {
+ if p == nil {
+ return ""
}
- return true
+ return fmt.Sprintf("TPartitionVersionInfo(%+v)", *p)
+
}
-func (p *TSnapshotRequest) Field7DeepEqual(src *bool) bool {
- if p.ListFiles == src {
+func (p *TPartitionVersionInfo) DeepEqual(ano *TPartitionVersionInfo) bool {
+ if p == ano {
return true
- } else if p.ListFiles == nil || src == nil {
+ } else if p == nil || ano == nil {
return false
}
- if *p.ListFiles != *src {
+ if !p.Field1DeepEqual(ano.PartitionId) {
return false
}
- return true
-}
-func (p *TSnapshotRequest) Field8DeepEqual(src *bool) bool {
-
- if p.AllowIncrementalClone == src {
- return true
- } else if p.AllowIncrementalClone == nil || src == nil {
+ if !p.Field2DeepEqual(ano.Version) {
return false
}
- if *p.AllowIncrementalClone != *src {
+ if !p.Field3DeepEqual(ano.VersionHash) {
return false
}
return true
}
-func (p *TSnapshotRequest) Field9DeepEqual(src int32) bool {
- if p.PreferredSnapshotVersion != src {
+func (p *TPartitionVersionInfo) Field1DeepEqual(src types.TPartitionId) bool {
+
+ if p.PartitionId != src {
return false
}
return true
}
-func (p *TSnapshotRequest) Field10DeepEqual(src *bool) bool {
+func (p *TPartitionVersionInfo) Field2DeepEqual(src types.TVersion) bool {
- if p.IsCopyTabletTask == src {
- return true
- } else if p.IsCopyTabletTask == nil || src == nil {
- return false
- }
- if *p.IsCopyTabletTask != *src {
+ if p.Version != src {
return false
}
return true
}
-func (p *TSnapshotRequest) Field11DeepEqual(src *types.TVersion) bool {
+func (p *TPartitionVersionInfo) Field3DeepEqual(src types.TVersionHash) bool {
- if p.StartVersion == src {
- return true
- } else if p.StartVersion == nil || src == nil {
- return false
- }
- if *p.StartVersion != *src {
+ if p.VersionHash != src {
return false
}
return true
}
-func (p *TSnapshotRequest) Field12DeepEqual(src *types.TVersion) bool {
- if p.EndVersion == src {
- return true
- } else if p.EndVersion == nil || src == nil {
- return false
- }
- if *p.EndVersion != *src {
- return false
- }
- return true
+type TMoveDirReq struct {
+ TabletId types.TTabletId `thrift:"tablet_id,1,required" frugal:"1,required,i64" json:"tablet_id"`
+ SchemaHash types.TSchemaHash `thrift:"schema_hash,2,required" frugal:"2,required,i32" json:"schema_hash"`
+ Src string `thrift:"src,3,required" frugal:"3,required,string" json:"src"`
+ JobId int64 `thrift:"job_id,4,required" frugal:"4,required,i64" json:"job_id"`
+ Overwrite bool `thrift:"overwrite,5,required" frugal:"5,required,bool" json:"overwrite"`
}
-func (p *TSnapshotRequest) Field13DeepEqual(src *bool) bool {
- if p.IsCopyBinlog == src {
- return true
- } else if p.IsCopyBinlog == nil || src == nil {
- return false
- }
- if *p.IsCopyBinlog != *src {
- return false
- }
- return true
+func NewTMoveDirReq() *TMoveDirReq {
+ return &TMoveDirReq{}
}
-type TReleaseSnapshotRequest struct {
- SnapshotPath string `thrift:"snapshot_path,1,required" frugal:"1,required,string" json:"snapshot_path"`
+func (p *TMoveDirReq) InitDefault() {
}
-func NewTReleaseSnapshotRequest() *TReleaseSnapshotRequest {
- return &TReleaseSnapshotRequest{}
+func (p *TMoveDirReq) GetTabletId() (v types.TTabletId) {
+ return p.TabletId
}
-func (p *TReleaseSnapshotRequest) InitDefault() {
- *p = TReleaseSnapshotRequest{}
+func (p *TMoveDirReq) GetSchemaHash() (v types.TSchemaHash) {
+ return p.SchemaHash
}
-func (p *TReleaseSnapshotRequest) GetSnapshotPath() (v string) {
- return p.SnapshotPath
+func (p *TMoveDirReq) GetSrc() (v string) {
+ return p.Src
}
-func (p *TReleaseSnapshotRequest) SetSnapshotPath(val string) {
- p.SnapshotPath = val
+
+func (p *TMoveDirReq) GetJobId() (v int64) {
+ return p.JobId
}
-var fieldIDToName_TReleaseSnapshotRequest = map[int16]string{
- 1: "snapshot_path",
+func (p *TMoveDirReq) GetOverwrite() (v bool) {
+ return p.Overwrite
+}
+func (p *TMoveDirReq) SetTabletId(val types.TTabletId) {
+ p.TabletId = val
+}
+func (p *TMoveDirReq) SetSchemaHash(val types.TSchemaHash) {
+ p.SchemaHash = val
+}
+func (p *TMoveDirReq) SetSrc(val string) {
+ p.Src = val
+}
+func (p *TMoveDirReq) SetJobId(val int64) {
+ p.JobId = val
+}
+func (p *TMoveDirReq) SetOverwrite(val bool) {
+ p.Overwrite = val
}
-func (p *TReleaseSnapshotRequest) Read(iprot thrift.TProtocol) (err error) {
+var fieldIDToName_TMoveDirReq = map[int16]string{
+ 1: "tablet_id",
+ 2: "schema_hash",
+ 3: "src",
+ 4: "job_id",
+ 5: "overwrite",
+}
+
+func (p *TMoveDirReq) Read(iprot thrift.TProtocol) (err error) {
var fieldTypeId thrift.TType
var fieldId int16
- var issetSnapshotPath bool = false
+ var issetTabletId bool = false
+ var issetSchemaHash bool = false
+ var issetSrc bool = false
+ var issetJobId bool = false
+ var issetOverwrite bool = false
if _, err = iprot.ReadStructBegin(); err != nil {
goto ReadStructBeginError
@@ -17014,22 +19699,55 @@ func (p *TReleaseSnapshotRequest) Read(iprot thrift.TProtocol) (err error) {
switch fieldId {
case 1:
- if fieldTypeId == thrift.STRING {
+ if fieldTypeId == thrift.I64 {
if err = p.ReadField1(iprot); err != nil {
goto ReadFieldError
}
- issetSnapshotPath = true
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
+ issetTabletId = true
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ case 2:
+ if fieldTypeId == thrift.I32 {
+ if err = p.ReadField2(iprot); err != nil {
+ goto ReadFieldError
+ }
+ issetSchemaHash = true
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ case 3:
+ if fieldTypeId == thrift.STRING {
+ if err = p.ReadField3(iprot); err != nil {
+ goto ReadFieldError
+ }
+ issetSrc = true
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ case 4:
+ if fieldTypeId == thrift.I64 {
+ if err = p.ReadField4(iprot); err != nil {
+ goto ReadFieldError
+ }
+ issetJobId = true
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ case 5:
+ if fieldTypeId == thrift.BOOL {
+ if err = p.ReadField5(iprot); err != nil {
+ goto ReadFieldError
}
+ issetOverwrite = true
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
default:
if err = iprot.Skip(fieldTypeId); err != nil {
goto SkipFieldError
}
}
-
if err = iprot.ReadFieldEnd(); err != nil {
goto ReadFieldEndError
}
@@ -17038,8 +19756,28 @@ func (p *TReleaseSnapshotRequest) Read(iprot thrift.TProtocol) (err error) {
goto ReadStructEndError
}
- if !issetSnapshotPath {
- fieldId = 1
+ if !issetTabletId {
+ fieldId = 1
+ goto RequiredFieldNotSetError
+ }
+
+ if !issetSchemaHash {
+ fieldId = 2
+ goto RequiredFieldNotSetError
+ }
+
+ if !issetSrc {
+ fieldId = 3
+ goto RequiredFieldNotSetError
+ }
+
+ if !issetJobId {
+ fieldId = 4
+ goto RequiredFieldNotSetError
+ }
+
+ if !issetOverwrite {
+ fieldId = 5
goto RequiredFieldNotSetError
}
return nil
@@ -17048,7 +19786,7 @@ ReadStructBeginError:
ReadFieldBeginError:
return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
ReadFieldError:
- return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TReleaseSnapshotRequest[fieldId]), err)
+ return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TMoveDirReq[fieldId]), err)
SkipFieldError:
return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
@@ -17057,21 +19795,68 @@ ReadFieldEndError:
ReadStructEndError:
return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
RequiredFieldNotSetError:
- return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TReleaseSnapshotRequest[fieldId]))
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TMoveDirReq[fieldId]))
}
-func (p *TReleaseSnapshotRequest) ReadField1(iprot thrift.TProtocol) error {
+func (p *TMoveDirReq) ReadField1(iprot thrift.TProtocol) error {
+
+ var _field types.TTabletId
+ if v, err := iprot.ReadI64(); err != nil {
+ return err
+ } else {
+ _field = v
+ }
+ p.TabletId = _field
+ return nil
+}
+func (p *TMoveDirReq) ReadField2(iprot thrift.TProtocol) error {
+
+ var _field types.TSchemaHash
+ if v, err := iprot.ReadI32(); err != nil {
+ return err
+ } else {
+ _field = v
+ }
+ p.SchemaHash = _field
+ return nil
+}
+func (p *TMoveDirReq) ReadField3(iprot thrift.TProtocol) error {
+
+ var _field string
if v, err := iprot.ReadString(); err != nil {
return err
} else {
- p.SnapshotPath = v
+ _field = v
}
+ p.Src = _field
return nil
}
+func (p *TMoveDirReq) ReadField4(iprot thrift.TProtocol) error {
-func (p *TReleaseSnapshotRequest) Write(oprot thrift.TProtocol) (err error) {
+ var _field int64
+ if v, err := iprot.ReadI64(); err != nil {
+ return err
+ } else {
+ _field = v
+ }
+ p.JobId = _field
+ return nil
+}
+func (p *TMoveDirReq) ReadField5(iprot thrift.TProtocol) error {
+
+ var _field bool
+ if v, err := iprot.ReadBool(); err != nil {
+ return err
+ } else {
+ _field = v
+ }
+ p.Overwrite = _field
+ return nil
+}
+
+func (p *TMoveDirReq) Write(oprot thrift.TProtocol) (err error) {
var fieldId int16
- if err = oprot.WriteStructBegin("TReleaseSnapshotRequest"); err != nil {
+ if err = oprot.WriteStructBegin("TMoveDirReq"); err != nil {
goto WriteStructBeginError
}
if p != nil {
@@ -17079,7 +19864,22 @@ func (p *TReleaseSnapshotRequest) Write(oprot thrift.TProtocol) (err error) {
fieldId = 1
goto WriteFieldError
}
-
+ if err = p.writeField2(oprot); err != nil {
+ fieldId = 2
+ goto WriteFieldError
+ }
+ if err = p.writeField3(oprot); err != nil {
+ fieldId = 3
+ goto WriteFieldError
+ }
+ if err = p.writeField4(oprot); err != nil {
+ fieldId = 4
+ goto WriteFieldError
+ }
+ if err = p.writeField5(oprot); err != nil {
+ fieldId = 5
+ goto WriteFieldError
+ }
}
if err = oprot.WriteFieldStop(); err != nil {
goto WriteFieldStopError
@@ -17098,11 +19898,11 @@ WriteStructEndError:
return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err)
}
-func (p *TReleaseSnapshotRequest) writeField1(oprot thrift.TProtocol) (err error) {
- if err = oprot.WriteFieldBegin("snapshot_path", thrift.STRING, 1); err != nil {
+func (p *TMoveDirReq) writeField1(oprot thrift.TProtocol) (err error) {
+ if err = oprot.WriteFieldBegin("tablet_id", thrift.I64, 1); err != nil {
goto WriteFieldBeginError
}
- if err := oprot.WriteString(p.SnapshotPath); err != nil {
+ if err := oprot.WriteI64(p.TabletId); err != nil {
return err
}
if err = oprot.WriteFieldEnd(); err != nil {
@@ -17115,71 +19915,219 @@ WriteFieldEndError:
return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err)
}
-func (p *TReleaseSnapshotRequest) String() string {
+func (p *TMoveDirReq) writeField2(oprot thrift.TProtocol) (err error) {
+ if err = oprot.WriteFieldBegin("schema_hash", thrift.I32, 2); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteI32(p.SchemaHash); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err)
+}
+
+func (p *TMoveDirReq) writeField3(oprot thrift.TProtocol) (err error) {
+ if err = oprot.WriteFieldBegin("src", thrift.STRING, 3); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteString(p.Src); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err)
+}
+
+func (p *TMoveDirReq) writeField4(oprot thrift.TProtocol) (err error) {
+ if err = oprot.WriteFieldBegin("job_id", thrift.I64, 4); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteI64(p.JobId); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err)
+}
+
+func (p *TMoveDirReq) writeField5(oprot thrift.TProtocol) (err error) {
+ if err = oprot.WriteFieldBegin("overwrite", thrift.BOOL, 5); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteBool(p.Overwrite); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 5 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err)
+}
+
+func (p *TMoveDirReq) String() string {
if p == nil {
return ""
}
- return fmt.Sprintf("TReleaseSnapshotRequest(%+v)", *p)
+ return fmt.Sprintf("TMoveDirReq(%+v)", *p)
+
}
-func (p *TReleaseSnapshotRequest) DeepEqual(ano *TReleaseSnapshotRequest) bool {
+func (p *TMoveDirReq) DeepEqual(ano *TMoveDirReq) bool {
if p == ano {
return true
} else if p == nil || ano == nil {
return false
}
- if !p.Field1DeepEqual(ano.SnapshotPath) {
+ if !p.Field1DeepEqual(ano.TabletId) {
+ return false
+ }
+ if !p.Field2DeepEqual(ano.SchemaHash) {
+ return false
+ }
+ if !p.Field3DeepEqual(ano.Src) {
+ return false
+ }
+ if !p.Field4DeepEqual(ano.JobId) {
+ return false
+ }
+ if !p.Field5DeepEqual(ano.Overwrite) {
return false
}
return true
}
-func (p *TReleaseSnapshotRequest) Field1DeepEqual(src string) bool {
+func (p *TMoveDirReq) Field1DeepEqual(src types.TTabletId) bool {
- if strings.Compare(p.SnapshotPath, src) != 0 {
+ if p.TabletId != src {
return false
}
return true
}
+func (p *TMoveDirReq) Field2DeepEqual(src types.TSchemaHash) bool {
-type TClearRemoteFileReq struct {
- RemoteFilePath string `thrift:"remote_file_path,1,required" frugal:"1,required,string" json:"remote_file_path"`
- RemoteSourceProperties map[string]string `thrift:"remote_source_properties,2,required" frugal:"2,required,map" json:"remote_source_properties"`
+ if p.SchemaHash != src {
+ return false
+ }
+ return true
}
+func (p *TMoveDirReq) Field3DeepEqual(src string) bool {
-func NewTClearRemoteFileReq() *TClearRemoteFileReq {
- return &TClearRemoteFileReq{}
+ if strings.Compare(p.Src, src) != 0 {
+ return false
+ }
+ return true
}
+func (p *TMoveDirReq) Field4DeepEqual(src int64) bool {
-func (p *TClearRemoteFileReq) InitDefault() {
- *p = TClearRemoteFileReq{}
+ if p.JobId != src {
+ return false
+ }
+ return true
}
+func (p *TMoveDirReq) Field5DeepEqual(src bool) bool {
-func (p *TClearRemoteFileReq) GetRemoteFilePath() (v string) {
- return p.RemoteFilePath
+ if p.Overwrite != src {
+ return false
+ }
+ return true
}
-func (p *TClearRemoteFileReq) GetRemoteSourceProperties() (v map[string]string) {
- return p.RemoteSourceProperties
+type TPublishVersionRequest struct {
+ TransactionId types.TTransactionId `thrift:"transaction_id,1,required" frugal:"1,required,i64" json:"transaction_id"`
+ PartitionVersionInfos []*TPartitionVersionInfo `thrift:"partition_version_infos,2,required" frugal:"2,required,list" json:"partition_version_infos"`
+ StrictMode bool `thrift:"strict_mode,3,optional" frugal:"3,optional,bool" json:"strict_mode,omitempty"`
+ BaseTabletIds []types.TTabletId `thrift:"base_tablet_ids,4,optional" frugal:"4,optional,set" json:"base_tablet_ids,omitempty"`
}
-func (p *TClearRemoteFileReq) SetRemoteFilePath(val string) {
- p.RemoteFilePath = val
+
+func NewTPublishVersionRequest() *TPublishVersionRequest {
+ return &TPublishVersionRequest{
+
+ StrictMode: false,
+ }
}
-func (p *TClearRemoteFileReq) SetRemoteSourceProperties(val map[string]string) {
- p.RemoteSourceProperties = val
+
+func (p *TPublishVersionRequest) InitDefault() {
+ p.StrictMode = false
}
-var fieldIDToName_TClearRemoteFileReq = map[int16]string{
- 1: "remote_file_path",
- 2: "remote_source_properties",
+func (p *TPublishVersionRequest) GetTransactionId() (v types.TTransactionId) {
+ return p.TransactionId
+}
+
+func (p *TPublishVersionRequest) GetPartitionVersionInfos() (v []*TPartitionVersionInfo) {
+ return p.PartitionVersionInfos
+}
+
+var TPublishVersionRequest_StrictMode_DEFAULT bool = false
+
+func (p *TPublishVersionRequest) GetStrictMode() (v bool) {
+ if !p.IsSetStrictMode() {
+ return TPublishVersionRequest_StrictMode_DEFAULT
+ }
+ return p.StrictMode
+}
+
+var TPublishVersionRequest_BaseTabletIds_DEFAULT []types.TTabletId
+
+func (p *TPublishVersionRequest) GetBaseTabletIds() (v []types.TTabletId) {
+ if !p.IsSetBaseTabletIds() {
+ return TPublishVersionRequest_BaseTabletIds_DEFAULT
+ }
+ return p.BaseTabletIds
+}
+func (p *TPublishVersionRequest) SetTransactionId(val types.TTransactionId) {
+ p.TransactionId = val
+}
+func (p *TPublishVersionRequest) SetPartitionVersionInfos(val []*TPartitionVersionInfo) {
+ p.PartitionVersionInfos = val
+}
+func (p *TPublishVersionRequest) SetStrictMode(val bool) {
+ p.StrictMode = val
+}
+func (p *TPublishVersionRequest) SetBaseTabletIds(val []types.TTabletId) {
+ p.BaseTabletIds = val
+}
+
+var fieldIDToName_TPublishVersionRequest = map[int16]string{
+ 1: "transaction_id",
+ 2: "partition_version_infos",
+ 3: "strict_mode",
+ 4: "base_tablet_ids",
+}
+
+func (p *TPublishVersionRequest) IsSetStrictMode() bool {
+ return p.StrictMode != TPublishVersionRequest_StrictMode_DEFAULT
+}
+
+func (p *TPublishVersionRequest) IsSetBaseTabletIds() bool {
+ return p.BaseTabletIds != nil
}
-func (p *TClearRemoteFileReq) Read(iprot thrift.TProtocol) (err error) {
+func (p *TPublishVersionRequest) Read(iprot thrift.TProtocol) (err error) {
var fieldTypeId thrift.TType
var fieldId int16
- var issetRemoteFilePath bool = false
- var issetRemoteSourceProperties bool = false
+ var issetTransactionId bool = false
+ var issetPartitionVersionInfos bool = false
if _, err = iprot.ReadStructBegin(); err != nil {
goto ReadStructBeginError
@@ -17196,33 +20144,44 @@ func (p *TClearRemoteFileReq) Read(iprot thrift.TProtocol) (err error) {
switch fieldId {
case 1:
- if fieldTypeId == thrift.STRING {
+ if fieldTypeId == thrift.I64 {
if err = p.ReadField1(iprot); err != nil {
goto ReadFieldError
}
- issetRemoteFilePath = true
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ issetTransactionId = true
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 2:
- if fieldTypeId == thrift.MAP {
+ if fieldTypeId == thrift.LIST {
if err = p.ReadField2(iprot); err != nil {
goto ReadFieldError
}
- issetRemoteSourceProperties = true
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
+ issetPartitionVersionInfos = true
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ case 3:
+ if fieldTypeId == thrift.BOOL {
+ if err = p.ReadField3(iprot); err != nil {
+ goto ReadFieldError
+ }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ case 4:
+ if fieldTypeId == thrift.SET {
+ if err = p.ReadField4(iprot); err != nil {
+ goto ReadFieldError
}
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
default:
if err = iprot.Skip(fieldTypeId); err != nil {
goto SkipFieldError
}
}
-
if err = iprot.ReadFieldEnd(); err != nil {
goto ReadFieldEndError
}
@@ -17231,12 +20190,12 @@ func (p *TClearRemoteFileReq) Read(iprot thrift.TProtocol) (err error) {
goto ReadStructEndError
}
- if !issetRemoteFilePath {
+ if !issetTransactionId {
fieldId = 1
goto RequiredFieldNotSetError
}
- if !issetRemoteSourceProperties {
+ if !issetPartitionVersionInfos {
fieldId = 2
goto RequiredFieldNotSetError
}
@@ -17246,7 +20205,7 @@ ReadStructBeginError:
ReadFieldBeginError:
return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
ReadFieldError:
- return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TClearRemoteFileReq[fieldId]), err)
+ return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TPublishVersionRequest[fieldId]), err)
SkipFieldError:
return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
@@ -17255,50 +20214,81 @@ ReadFieldEndError:
ReadStructEndError:
return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
RequiredFieldNotSetError:
- return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TClearRemoteFileReq[fieldId]))
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TPublishVersionRequest[fieldId]))
}
-func (p *TClearRemoteFileReq) ReadField1(iprot thrift.TProtocol) error {
- if v, err := iprot.ReadString(); err != nil {
+func (p *TPublishVersionRequest) ReadField1(iprot thrift.TProtocol) error {
+
+ var _field types.TTransactionId
+ if v, err := iprot.ReadI64(); err != nil {
return err
} else {
- p.RemoteFilePath = v
+ _field = v
}
+ p.TransactionId = _field
return nil
}
-
-func (p *TClearRemoteFileReq) ReadField2(iprot thrift.TProtocol) error {
- _, _, size, err := iprot.ReadMapBegin()
+func (p *TPublishVersionRequest) ReadField2(iprot thrift.TProtocol) error {
+ _, size, err := iprot.ReadListBegin()
if err != nil {
return err
}
- p.RemoteSourceProperties = make(map[string]string, size)
+ _field := make([]*TPartitionVersionInfo, 0, size)
+ values := make([]TPartitionVersionInfo, size)
for i := 0; i < size; i++ {
- var _key string
- if v, err := iprot.ReadString(); err != nil {
+ _elem := &values[i]
+ _elem.InitDefault()
+
+ if err := _elem.Read(iprot); err != nil {
return err
- } else {
- _key = v
}
- var _val string
- if v, err := iprot.ReadString(); err != nil {
+ _field = append(_field, _elem)
+ }
+ if err := iprot.ReadListEnd(); err != nil {
+ return err
+ }
+ p.PartitionVersionInfos = _field
+ return nil
+}
+func (p *TPublishVersionRequest) ReadField3(iprot thrift.TProtocol) error {
+
+ var _field bool
+ if v, err := iprot.ReadBool(); err != nil {
+ return err
+ } else {
+ _field = v
+ }
+ p.StrictMode = _field
+ return nil
+}
+func (p *TPublishVersionRequest) ReadField4(iprot thrift.TProtocol) error {
+ _, size, err := iprot.ReadSetBegin()
+ if err != nil {
+ return err
+ }
+ _field := make([]types.TTabletId, 0, size)
+ for i := 0; i < size; i++ {
+
+ var _elem types.TTabletId
+ if v, err := iprot.ReadI64(); err != nil {
return err
} else {
- _val = v
+ _elem = v
}
- p.RemoteSourceProperties[_key] = _val
+ _field = append(_field, _elem)
}
- if err := iprot.ReadMapEnd(); err != nil {
+ if err := iprot.ReadSetEnd(); err != nil {
return err
}
+ p.BaseTabletIds = _field
return nil
}
-func (p *TClearRemoteFileReq) Write(oprot thrift.TProtocol) (err error) {
+func (p *TPublishVersionRequest) Write(oprot thrift.TProtocol) (err error) {
var fieldId int16
- if err = oprot.WriteStructBegin("TClearRemoteFileReq"); err != nil {
+ if err = oprot.WriteStructBegin("TPublishVersionRequest"); err != nil {
goto WriteStructBeginError
}
if p != nil {
@@ -17310,7 +20300,14 @@ func (p *TClearRemoteFileReq) Write(oprot thrift.TProtocol) (err error) {
fieldId = 2
goto WriteFieldError
}
-
+ if err = p.writeField3(oprot); err != nil {
+ fieldId = 3
+ goto WriteFieldError
+ }
+ if err = p.writeField4(oprot); err != nil {
+ fieldId = 4
+ goto WriteFieldError
+ }
}
if err = oprot.WriteFieldStop(); err != nil {
goto WriteFieldStopError
@@ -17329,11 +20326,11 @@ WriteStructEndError:
return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err)
}
-func (p *TClearRemoteFileReq) writeField1(oprot thrift.TProtocol) (err error) {
- if err = oprot.WriteFieldBegin("remote_file_path", thrift.STRING, 1); err != nil {
+func (p *TPublishVersionRequest) writeField1(oprot thrift.TProtocol) (err error) {
+ if err = oprot.WriteFieldBegin("transaction_id", thrift.I64, 1); err != nil {
goto WriteFieldBeginError
}
- if err := oprot.WriteString(p.RemoteFilePath); err != nil {
+ if err := oprot.WriteI64(p.TransactionId); err != nil {
return err
}
if err = oprot.WriteFieldEnd(); err != nil {
@@ -17346,24 +20343,19 @@ WriteFieldEndError:
return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err)
}
-func (p *TClearRemoteFileReq) writeField2(oprot thrift.TProtocol) (err error) {
- if err = oprot.WriteFieldBegin("remote_source_properties", thrift.MAP, 2); err != nil {
+func (p *TPublishVersionRequest) writeField2(oprot thrift.TProtocol) (err error) {
+ if err = oprot.WriteFieldBegin("partition_version_infos", thrift.LIST, 2); err != nil {
goto WriteFieldBeginError
}
- if err := oprot.WriteMapBegin(thrift.STRING, thrift.STRING, len(p.RemoteSourceProperties)); err != nil {
+ if err := oprot.WriteListBegin(thrift.STRUCT, len(p.PartitionVersionInfos)); err != nil {
return err
}
- for k, v := range p.RemoteSourceProperties {
-
- if err := oprot.WriteString(k); err != nil {
- return err
- }
-
- if err := oprot.WriteString(v); err != nil {
+ for _, v := range p.PartitionVersionInfos {
+ if err := v.Write(oprot); err != nil {
return err
}
}
- if err := oprot.WriteMapEnd(); err != nil {
+ if err := oprot.WriteListEnd(); err != nil {
return err
}
if err = oprot.WriteFieldEnd(); err != nil {
@@ -17376,97 +20368,161 @@ WriteFieldEndError:
return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err)
}
-func (p *TClearRemoteFileReq) String() string {
+func (p *TPublishVersionRequest) writeField3(oprot thrift.TProtocol) (err error) {
+ if p.IsSetStrictMode() {
+ if err = oprot.WriteFieldBegin("strict_mode", thrift.BOOL, 3); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteBool(p.StrictMode); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err)
+}
+
+func (p *TPublishVersionRequest) writeField4(oprot thrift.TProtocol) (err error) {
+ if p.IsSetBaseTabletIds() {
+ if err = oprot.WriteFieldBegin("base_tablet_ids", thrift.SET, 4); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteSetBegin(thrift.I64, len(p.BaseTabletIds)); err != nil {
+ return err
+ }
+ for i := 0; i < len(p.BaseTabletIds); i++ {
+ for j := i + 1; j < len(p.BaseTabletIds); j++ {
+ if func(tgt, src types.TTabletId) bool {
+ if tgt != src {
+ return false
+ }
+ return true
+ }(p.BaseTabletIds[i], p.BaseTabletIds[j]) {
+ return thrift.PrependError("", fmt.Errorf("%T error writing set field: slice is not unique", p.BaseTabletIds[i]))
+ }
+ }
+ }
+ for _, v := range p.BaseTabletIds {
+ if err := oprot.WriteI64(v); err != nil {
+ return err
+ }
+ }
+ if err := oprot.WriteSetEnd(); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err)
+}
+
+func (p *TPublishVersionRequest) String() string {
if p == nil {
return ""
}
- return fmt.Sprintf("TClearRemoteFileReq(%+v)", *p)
+ return fmt.Sprintf("TPublishVersionRequest(%+v)", *p)
+
}
-func (p *TClearRemoteFileReq) DeepEqual(ano *TClearRemoteFileReq) bool {
+func (p *TPublishVersionRequest) DeepEqual(ano *TPublishVersionRequest) bool {
if p == ano {
return true
} else if p == nil || ano == nil {
return false
}
- if !p.Field1DeepEqual(ano.RemoteFilePath) {
+ if !p.Field1DeepEqual(ano.TransactionId) {
return false
}
- if !p.Field2DeepEqual(ano.RemoteSourceProperties) {
+ if !p.Field2DeepEqual(ano.PartitionVersionInfos) {
+ return false
+ }
+ if !p.Field3DeepEqual(ano.StrictMode) {
+ return false
+ }
+ if !p.Field4DeepEqual(ano.BaseTabletIds) {
return false
}
return true
}
-func (p *TClearRemoteFileReq) Field1DeepEqual(src string) bool {
+func (p *TPublishVersionRequest) Field1DeepEqual(src types.TTransactionId) bool {
- if strings.Compare(p.RemoteFilePath, src) != 0 {
+ if p.TransactionId != src {
return false
}
return true
}
-func (p *TClearRemoteFileReq) Field2DeepEqual(src map[string]string) bool {
+func (p *TPublishVersionRequest) Field2DeepEqual(src []*TPartitionVersionInfo) bool {
- if len(p.RemoteSourceProperties) != len(src) {
+ if len(p.PartitionVersionInfos) != len(src) {
return false
}
- for k, v := range p.RemoteSourceProperties {
- _src := src[k]
- if strings.Compare(v, _src) != 0 {
+ for i, v := range p.PartitionVersionInfos {
+ _src := src[i]
+ if !v.DeepEqual(_src) {
return false
}
}
return true
}
+func (p *TPublishVersionRequest) Field3DeepEqual(src bool) bool {
-type TPartitionVersionInfo struct {
- PartitionId types.TPartitionId `thrift:"partition_id,1,required" frugal:"1,required,i64" json:"partition_id"`
- Version types.TVersion `thrift:"version,2,required" frugal:"2,required,i64" json:"version"`
- VersionHash types.TVersionHash `thrift:"version_hash,3,required" frugal:"3,required,i64" json:"version_hash"`
+ if p.StrictMode != src {
+ return false
+ }
+ return true
}
+func (p *TPublishVersionRequest) Field4DeepEqual(src []types.TTabletId) bool {
-func NewTPartitionVersionInfo() *TPartitionVersionInfo {
- return &TPartitionVersionInfo{}
+ if len(p.BaseTabletIds) != len(src) {
+ return false
+ }
+ for i, v := range p.BaseTabletIds {
+ _src := src[i]
+ if v != _src {
+ return false
+ }
+ }
+ return true
}
-func (p *TPartitionVersionInfo) InitDefault() {
- *p = TPartitionVersionInfo{}
+type TVisibleVersionReq struct {
+ PartitionVersion map[types.TPartitionId]types.TVersion `thrift:"partition_version,1,required" frugal:"1,required,map" json:"partition_version"`
}
-func (p *TPartitionVersionInfo) GetPartitionId() (v types.TPartitionId) {
- return p.PartitionId
+func NewTVisibleVersionReq() *TVisibleVersionReq {
+ return &TVisibleVersionReq{}
}
-func (p *TPartitionVersionInfo) GetVersion() (v types.TVersion) {
- return p.Version
+func (p *TVisibleVersionReq) InitDefault() {
}
-func (p *TPartitionVersionInfo) GetVersionHash() (v types.TVersionHash) {
- return p.VersionHash
-}
-func (p *TPartitionVersionInfo) SetPartitionId(val types.TPartitionId) {
- p.PartitionId = val
-}
-func (p *TPartitionVersionInfo) SetVersion(val types.TVersion) {
- p.Version = val
+func (p *TVisibleVersionReq) GetPartitionVersion() (v map[types.TPartitionId]types.TVersion) {
+ return p.PartitionVersion
}
-func (p *TPartitionVersionInfo) SetVersionHash(val types.TVersionHash) {
- p.VersionHash = val
+func (p *TVisibleVersionReq) SetPartitionVersion(val map[types.TPartitionId]types.TVersion) {
+ p.PartitionVersion = val
}
-var fieldIDToName_TPartitionVersionInfo = map[int16]string{
- 1: "partition_id",
- 2: "version",
- 3: "version_hash",
+var fieldIDToName_TVisibleVersionReq = map[int16]string{
+ 1: "partition_version",
}
-func (p *TPartitionVersionInfo) Read(iprot thrift.TProtocol) (err error) {
+func (p *TVisibleVersionReq) Read(iprot thrift.TProtocol) (err error) {
var fieldTypeId thrift.TType
var fieldId int16
- var issetPartitionId bool = false
- var issetVersion bool = false
- var issetVersionHash bool = false
+ var issetPartitionVersion bool = false
if _, err = iprot.ReadStructBegin(); err != nil {
goto ReadStructBeginError
@@ -17475,52 +20531,27 @@ func (p *TPartitionVersionInfo) Read(iprot thrift.TProtocol) (err error) {
for {
_, fieldTypeId, fieldId, err = iprot.ReadFieldBegin()
if err != nil {
- goto ReadFieldBeginError
- }
- if fieldTypeId == thrift.STOP {
- break
- }
-
- switch fieldId {
- case 1:
- if fieldTypeId == thrift.I64 {
- if err = p.ReadField1(iprot); err != nil {
- goto ReadFieldError
- }
- issetPartitionId = true
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
- }
- case 2:
- if fieldTypeId == thrift.I64 {
- if err = p.ReadField2(iprot); err != nil {
- goto ReadFieldError
- }
- issetVersion = true
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
- }
- case 3:
- if fieldTypeId == thrift.I64 {
- if err = p.ReadField3(iprot); err != nil {
+ goto ReadFieldBeginError
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+
+ switch fieldId {
+ case 1:
+ if fieldTypeId == thrift.MAP {
+ if err = p.ReadField1(iprot); err != nil {
goto ReadFieldError
}
- issetVersionHash = true
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ issetPartitionVersion = true
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
default:
if err = iprot.Skip(fieldTypeId); err != nil {
goto SkipFieldError
}
}
-
if err = iprot.ReadFieldEnd(); err != nil {
goto ReadFieldEndError
}
@@ -17529,27 +20560,17 @@ func (p *TPartitionVersionInfo) Read(iprot thrift.TProtocol) (err error) {
goto ReadStructEndError
}
- if !issetPartitionId {
+ if !issetPartitionVersion {
fieldId = 1
goto RequiredFieldNotSetError
}
-
- if !issetVersion {
- fieldId = 2
- goto RequiredFieldNotSetError
- }
-
- if !issetVersionHash {
- fieldId = 3
- goto RequiredFieldNotSetError
- }
return nil
ReadStructBeginError:
return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err)
ReadFieldBeginError:
return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
ReadFieldError:
- return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TPartitionVersionInfo[fieldId]), err)
+ return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TVisibleVersionReq[fieldId]), err)
SkipFieldError:
return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
@@ -17558,39 +20579,42 @@ ReadFieldEndError:
ReadStructEndError:
return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
RequiredFieldNotSetError:
- return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TPartitionVersionInfo[fieldId]))
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TVisibleVersionReq[fieldId]))
}
-func (p *TPartitionVersionInfo) ReadField1(iprot thrift.TProtocol) error {
- if v, err := iprot.ReadI64(); err != nil {
+func (p *TVisibleVersionReq) ReadField1(iprot thrift.TProtocol) error {
+ _, _, size, err := iprot.ReadMapBegin()
+ if err != nil {
return err
- } else {
- p.PartitionId = v
}
- return nil
-}
+ _field := make(map[types.TPartitionId]types.TVersion, size)
+ for i := 0; i < size; i++ {
+ var _key types.TPartitionId
+ if v, err := iprot.ReadI64(); err != nil {
+ return err
+ } else {
+ _key = v
+ }
-func (p *TPartitionVersionInfo) ReadField2(iprot thrift.TProtocol) error {
- if v, err := iprot.ReadI64(); err != nil {
- return err
- } else {
- p.Version = v
- }
- return nil
-}
+ var _val types.TVersion
+ if v, err := iprot.ReadI64(); err != nil {
+ return err
+ } else {
+ _val = v
+ }
-func (p *TPartitionVersionInfo) ReadField3(iprot thrift.TProtocol) error {
- if v, err := iprot.ReadI64(); err != nil {
+ _field[_key] = _val
+ }
+ if err := iprot.ReadMapEnd(); err != nil {
return err
- } else {
- p.VersionHash = v
}
+ p.PartitionVersion = _field
return nil
}
-func (p *TPartitionVersionInfo) Write(oprot thrift.TProtocol) (err error) {
+func (p *TVisibleVersionReq) Write(oprot thrift.TProtocol) (err error) {
var fieldId int16
- if err = oprot.WriteStructBegin("TPartitionVersionInfo"); err != nil {
+ if err = oprot.WriteStructBegin("TVisibleVersionReq"); err != nil {
goto WriteStructBeginError
}
if p != nil {
@@ -17598,15 +20622,6 @@ func (p *TPartitionVersionInfo) Write(oprot thrift.TProtocol) (err error) {
fieldId = 1
goto WriteFieldError
}
- if err = p.writeField2(oprot); err != nil {
- fieldId = 2
- goto WriteFieldError
- }
- if err = p.writeField3(oprot); err != nil {
- fieldId = 3
- goto WriteFieldError
- }
-
}
if err = oprot.WriteFieldStop(); err != nil {
goto WriteFieldStopError
@@ -17625,45 +20640,22 @@ WriteStructEndError:
return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err)
}
-func (p *TPartitionVersionInfo) writeField1(oprot thrift.TProtocol) (err error) {
- if err = oprot.WriteFieldBegin("partition_id", thrift.I64, 1); err != nil {
- goto WriteFieldBeginError
- }
- if err := oprot.WriteI64(p.PartitionId); err != nil {
- return err
- }
- if err = oprot.WriteFieldEnd(); err != nil {
- goto WriteFieldEndError
- }
- return nil
-WriteFieldBeginError:
- return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err)
-WriteFieldEndError:
- return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err)
-}
-
-func (p *TPartitionVersionInfo) writeField2(oprot thrift.TProtocol) (err error) {
- if err = oprot.WriteFieldBegin("version", thrift.I64, 2); err != nil {
+func (p *TVisibleVersionReq) writeField1(oprot thrift.TProtocol) (err error) {
+ if err = oprot.WriteFieldBegin("partition_version", thrift.MAP, 1); err != nil {
goto WriteFieldBeginError
}
- if err := oprot.WriteI64(p.Version); err != nil {
+ if err := oprot.WriteMapBegin(thrift.I64, thrift.I64, len(p.PartitionVersion)); err != nil {
return err
}
- if err = oprot.WriteFieldEnd(); err != nil {
- goto WriteFieldEndError
- }
- return nil
-WriteFieldBeginError:
- return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err)
-WriteFieldEndError:
- return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err)
-}
-
-func (p *TPartitionVersionInfo) writeField3(oprot thrift.TProtocol) (err error) {
- if err = oprot.WriteFieldBegin("version_hash", thrift.I64, 3); err != nil {
- goto WriteFieldBeginError
+ for k, v := range p.PartitionVersion {
+ if err := oprot.WriteI64(k); err != nil {
+ return err
+ }
+ if err := oprot.WriteI64(v); err != nil {
+ return err
+ }
}
- if err := oprot.WriteI64(p.VersionHash); err != nil {
+ if err := oprot.WriteMapEnd(); err != nil {
return err
}
if err = oprot.WriteFieldEnd(); err != nil {
@@ -17671,126 +20663,164 @@ func (p *TPartitionVersionInfo) writeField3(oprot thrift.TProtocol) (err error)
}
return nil
WriteFieldBeginError:
- return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err)
+ return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err)
WriteFieldEndError:
- return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err)
+ return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err)
}
-func (p *TPartitionVersionInfo) String() string {
+func (p *TVisibleVersionReq) String() string {
if p == nil {
return ""
}
- return fmt.Sprintf("TPartitionVersionInfo(%+v)", *p)
+ return fmt.Sprintf("TVisibleVersionReq(%+v)", *p)
+
}
-func (p *TPartitionVersionInfo) DeepEqual(ano *TPartitionVersionInfo) bool {
+func (p *TVisibleVersionReq) DeepEqual(ano *TVisibleVersionReq) bool {
if p == ano {
return true
} else if p == nil || ano == nil {
return false
}
- if !p.Field1DeepEqual(ano.PartitionId) {
- return false
- }
- if !p.Field2DeepEqual(ano.Version) {
- return false
- }
- if !p.Field3DeepEqual(ano.VersionHash) {
+ if !p.Field1DeepEqual(ano.PartitionVersion) {
return false
}
return true
}
-func (p *TPartitionVersionInfo) Field1DeepEqual(src types.TPartitionId) bool {
+func (p *TVisibleVersionReq) Field1DeepEqual(src map[types.TPartitionId]types.TVersion) bool {
- if p.PartitionId != src {
+ if len(p.PartitionVersion) != len(src) {
return false
}
+ for k, v := range p.PartitionVersion {
+ _src := src[k]
+ if v != _src {
+ return false
+ }
+ }
return true
}
-func (p *TPartitionVersionInfo) Field2DeepEqual(src types.TVersion) bool {
- if p.Version != src {
- return false
- }
- return true
+type TCalcDeleteBitmapPartitionInfo struct {
+ PartitionId types.TPartitionId `thrift:"partition_id,1,required" frugal:"1,required,i64" json:"partition_id"`
+ Version types.TVersion `thrift:"version,2,required" frugal:"2,required,i64" json:"version"`
+ TabletIds []types.TTabletId `thrift:"tablet_ids,3,required" frugal:"3,required,list" json:"tablet_ids"`
+ BaseCompactionCnts []int64 `thrift:"base_compaction_cnts,4,optional" frugal:"4,optional,list" json:"base_compaction_cnts,omitempty"`
+ CumulativeCompactionCnts []int64 `thrift:"cumulative_compaction_cnts,5,optional" frugal:"5,optional,list" json:"cumulative_compaction_cnts,omitempty"`
+ CumulativePoints []int64 `thrift:"cumulative_points,6,optional" frugal:"6,optional,list" json:"cumulative_points,omitempty"`
+ SubTxnIds []int64 `thrift:"sub_txn_ids,7,optional" frugal:"7,optional,list" json:"sub_txn_ids,omitempty"`
}
-func (p *TPartitionVersionInfo) Field3DeepEqual(src types.TVersionHash) bool {
- if p.VersionHash != src {
- return false
- }
- return true
+func NewTCalcDeleteBitmapPartitionInfo() *TCalcDeleteBitmapPartitionInfo {
+ return &TCalcDeleteBitmapPartitionInfo{}
}
-type TMoveDirReq struct {
- TabletId types.TTabletId `thrift:"tablet_id,1,required" frugal:"1,required,i64" json:"tablet_id"`
- SchemaHash types.TSchemaHash `thrift:"schema_hash,2,required" frugal:"2,required,i32" json:"schema_hash"`
- Src string `thrift:"src,3,required" frugal:"3,required,string" json:"src"`
- JobId int64 `thrift:"job_id,4,required" frugal:"4,required,i64" json:"job_id"`
- Overwrite bool `thrift:"overwrite,5,required" frugal:"5,required,bool" json:"overwrite"`
+func (p *TCalcDeleteBitmapPartitionInfo) InitDefault() {
}
-func NewTMoveDirReq() *TMoveDirReq {
- return &TMoveDirReq{}
+func (p *TCalcDeleteBitmapPartitionInfo) GetPartitionId() (v types.TPartitionId) {
+ return p.PartitionId
}
-func (p *TMoveDirReq) InitDefault() {
- *p = TMoveDirReq{}
+func (p *TCalcDeleteBitmapPartitionInfo) GetVersion() (v types.TVersion) {
+ return p.Version
}
-func (p *TMoveDirReq) GetTabletId() (v types.TTabletId) {
- return p.TabletId
+func (p *TCalcDeleteBitmapPartitionInfo) GetTabletIds() (v []types.TTabletId) {
+ return p.TabletIds
}
-func (p *TMoveDirReq) GetSchemaHash() (v types.TSchemaHash) {
- return p.SchemaHash
+var TCalcDeleteBitmapPartitionInfo_BaseCompactionCnts_DEFAULT []int64
+
+func (p *TCalcDeleteBitmapPartitionInfo) GetBaseCompactionCnts() (v []int64) {
+ if !p.IsSetBaseCompactionCnts() {
+ return TCalcDeleteBitmapPartitionInfo_BaseCompactionCnts_DEFAULT
+ }
+ return p.BaseCompactionCnts
}
-func (p *TMoveDirReq) GetSrc() (v string) {
- return p.Src
+var TCalcDeleteBitmapPartitionInfo_CumulativeCompactionCnts_DEFAULT []int64
+
+func (p *TCalcDeleteBitmapPartitionInfo) GetCumulativeCompactionCnts() (v []int64) {
+ if !p.IsSetCumulativeCompactionCnts() {
+ return TCalcDeleteBitmapPartitionInfo_CumulativeCompactionCnts_DEFAULT
+ }
+ return p.CumulativeCompactionCnts
}
-func (p *TMoveDirReq) GetJobId() (v int64) {
- return p.JobId
+var TCalcDeleteBitmapPartitionInfo_CumulativePoints_DEFAULT []int64
+
+func (p *TCalcDeleteBitmapPartitionInfo) GetCumulativePoints() (v []int64) {
+ if !p.IsSetCumulativePoints() {
+ return TCalcDeleteBitmapPartitionInfo_CumulativePoints_DEFAULT
+ }
+ return p.CumulativePoints
}
-func (p *TMoveDirReq) GetOverwrite() (v bool) {
- return p.Overwrite
+var TCalcDeleteBitmapPartitionInfo_SubTxnIds_DEFAULT []int64
+
+func (p *TCalcDeleteBitmapPartitionInfo) GetSubTxnIds() (v []int64) {
+ if !p.IsSetSubTxnIds() {
+ return TCalcDeleteBitmapPartitionInfo_SubTxnIds_DEFAULT
+ }
+ return p.SubTxnIds
}
-func (p *TMoveDirReq) SetTabletId(val types.TTabletId) {
- p.TabletId = val
+func (p *TCalcDeleteBitmapPartitionInfo) SetPartitionId(val types.TPartitionId) {
+ p.PartitionId = val
}
-func (p *TMoveDirReq) SetSchemaHash(val types.TSchemaHash) {
- p.SchemaHash = val
+func (p *TCalcDeleteBitmapPartitionInfo) SetVersion(val types.TVersion) {
+ p.Version = val
}
-func (p *TMoveDirReq) SetSrc(val string) {
- p.Src = val
+func (p *TCalcDeleteBitmapPartitionInfo) SetTabletIds(val []types.TTabletId) {
+ p.TabletIds = val
}
-func (p *TMoveDirReq) SetJobId(val int64) {
- p.JobId = val
+func (p *TCalcDeleteBitmapPartitionInfo) SetBaseCompactionCnts(val []int64) {
+ p.BaseCompactionCnts = val
}
-func (p *TMoveDirReq) SetOverwrite(val bool) {
- p.Overwrite = val
+func (p *TCalcDeleteBitmapPartitionInfo) SetCumulativeCompactionCnts(val []int64) {
+ p.CumulativeCompactionCnts = val
+}
+func (p *TCalcDeleteBitmapPartitionInfo) SetCumulativePoints(val []int64) {
+ p.CumulativePoints = val
+}
+func (p *TCalcDeleteBitmapPartitionInfo) SetSubTxnIds(val []int64) {
+ p.SubTxnIds = val
}
-var fieldIDToName_TMoveDirReq = map[int16]string{
- 1: "tablet_id",
- 2: "schema_hash",
- 3: "src",
- 4: "job_id",
- 5: "overwrite",
+var fieldIDToName_TCalcDeleteBitmapPartitionInfo = map[int16]string{
+ 1: "partition_id",
+ 2: "version",
+ 3: "tablet_ids",
+ 4: "base_compaction_cnts",
+ 5: "cumulative_compaction_cnts",
+ 6: "cumulative_points",
+ 7: "sub_txn_ids",
}
-func (p *TMoveDirReq) Read(iprot thrift.TProtocol) (err error) {
+func (p *TCalcDeleteBitmapPartitionInfo) IsSetBaseCompactionCnts() bool {
+ return p.BaseCompactionCnts != nil
+}
+
+func (p *TCalcDeleteBitmapPartitionInfo) IsSetCumulativeCompactionCnts() bool {
+ return p.CumulativeCompactionCnts != nil
+}
+
+func (p *TCalcDeleteBitmapPartitionInfo) IsSetCumulativePoints() bool {
+ return p.CumulativePoints != nil
+}
+
+func (p *TCalcDeleteBitmapPartitionInfo) IsSetSubTxnIds() bool {
+ return p.SubTxnIds != nil
+}
+
+func (p *TCalcDeleteBitmapPartitionInfo) Read(iprot thrift.TProtocol) (err error) {
var fieldTypeId thrift.TType
var fieldId int16
- var issetTabletId bool = false
- var issetSchemaHash bool = false
- var issetSrc bool = false
- var issetJobId bool = false
- var issetOverwrite bool = false
+ var issetPartitionId bool = false
+ var issetVersion bool = false
+ var issetTabletIds bool = false
if _, err = iprot.ReadStructBegin(); err != nil {
goto ReadStructBeginError
@@ -17811,92 +20841,85 @@ func (p *TMoveDirReq) Read(iprot thrift.TProtocol) (err error) {
if err = p.ReadField1(iprot); err != nil {
goto ReadFieldError
}
- issetTabletId = true
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ issetPartitionId = true
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 2:
- if fieldTypeId == thrift.I32 {
+ if fieldTypeId == thrift.I64 {
if err = p.ReadField2(iprot); err != nil {
goto ReadFieldError
}
- issetSchemaHash = true
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ issetVersion = true
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 3:
- if fieldTypeId == thrift.STRING {
+ if fieldTypeId == thrift.LIST {
if err = p.ReadField3(iprot); err != nil {
goto ReadFieldError
}
- issetSrc = true
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ issetTabletIds = true
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 4:
- if fieldTypeId == thrift.I64 {
+ if fieldTypeId == thrift.LIST {
if err = p.ReadField4(iprot); err != nil {
goto ReadFieldError
}
- issetJobId = true
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 5:
- if fieldTypeId == thrift.BOOL {
+ if fieldTypeId == thrift.LIST {
if err = p.ReadField5(iprot); err != nil {
goto ReadFieldError
}
- issetOverwrite = true
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ case 6:
+ if fieldTypeId == thrift.LIST {
+ if err = p.ReadField6(iprot); err != nil {
+ goto ReadFieldError
+ }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ case 7:
+ if fieldTypeId == thrift.LIST {
+ if err = p.ReadField7(iprot); err != nil {
+ goto ReadFieldError
}
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
default:
if err = iprot.Skip(fieldTypeId); err != nil {
goto SkipFieldError
}
}
-
if err = iprot.ReadFieldEnd(); err != nil {
goto ReadFieldEndError
}
}
if err = iprot.ReadStructEnd(); err != nil {
- goto ReadStructEndError
- }
-
- if !issetTabletId {
- fieldId = 1
- goto RequiredFieldNotSetError
- }
-
- if !issetSchemaHash {
- fieldId = 2
- goto RequiredFieldNotSetError
+ goto ReadStructEndError
}
- if !issetSrc {
- fieldId = 3
+ if !issetPartitionId {
+ fieldId = 1
goto RequiredFieldNotSetError
}
- if !issetJobId {
- fieldId = 4
+ if !issetVersion {
+ fieldId = 2
goto RequiredFieldNotSetError
}
- if !issetOverwrite {
- fieldId = 5
+ if !issetTabletIds {
+ fieldId = 3
goto RequiredFieldNotSetError
}
return nil
@@ -17905,7 +20928,7 @@ ReadStructBeginError:
ReadFieldBeginError:
return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
ReadFieldError:
- return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TMoveDirReq[fieldId]), err)
+ return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TCalcDeleteBitmapPartitionInfo[fieldId]), err)
SkipFieldError:
return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
@@ -17914,57 +20937,150 @@ ReadFieldEndError:
ReadStructEndError:
return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
RequiredFieldNotSetError:
- return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TMoveDirReq[fieldId]))
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TCalcDeleteBitmapPartitionInfo[fieldId]))
}
-func (p *TMoveDirReq) ReadField1(iprot thrift.TProtocol) error {
+func (p *TCalcDeleteBitmapPartitionInfo) ReadField1(iprot thrift.TProtocol) error {
+
+ var _field types.TPartitionId
if v, err := iprot.ReadI64(); err != nil {
return err
} else {
- p.TabletId = v
+ _field = v
}
+ p.PartitionId = _field
return nil
}
+func (p *TCalcDeleteBitmapPartitionInfo) ReadField2(iprot thrift.TProtocol) error {
-func (p *TMoveDirReq) ReadField2(iprot thrift.TProtocol) error {
- if v, err := iprot.ReadI32(); err != nil {
+ var _field types.TVersion
+ if v, err := iprot.ReadI64(); err != nil {
return err
} else {
- p.SchemaHash = v
+ _field = v
}
+ p.Version = _field
return nil
}
+func (p *TCalcDeleteBitmapPartitionInfo) ReadField3(iprot thrift.TProtocol) error {
+ _, size, err := iprot.ReadListBegin()
+ if err != nil {
+ return err
+ }
+ _field := make([]types.TTabletId, 0, size)
+ for i := 0; i < size; i++ {
-func (p *TMoveDirReq) ReadField3(iprot thrift.TProtocol) error {
- if v, err := iprot.ReadString(); err != nil {
+ var _elem types.TTabletId
+ if v, err := iprot.ReadI64(); err != nil {
+ return err
+ } else {
+ _elem = v
+ }
+
+ _field = append(_field, _elem)
+ }
+ if err := iprot.ReadListEnd(); err != nil {
return err
- } else {
- p.Src = v
}
+ p.TabletIds = _field
return nil
}
+func (p *TCalcDeleteBitmapPartitionInfo) ReadField4(iprot thrift.TProtocol) error {
+ _, size, err := iprot.ReadListBegin()
+ if err != nil {
+ return err
+ }
+ _field := make([]int64, 0, size)
+ for i := 0; i < size; i++ {
-func (p *TMoveDirReq) ReadField4(iprot thrift.TProtocol) error {
- if v, err := iprot.ReadI64(); err != nil {
+ var _elem int64
+ if v, err := iprot.ReadI64(); err != nil {
+ return err
+ } else {
+ _elem = v
+ }
+
+ _field = append(_field, _elem)
+ }
+ if err := iprot.ReadListEnd(); err != nil {
return err
- } else {
- p.JobId = v
}
+ p.BaseCompactionCnts = _field
return nil
}
+func (p *TCalcDeleteBitmapPartitionInfo) ReadField5(iprot thrift.TProtocol) error {
+ _, size, err := iprot.ReadListBegin()
+ if err != nil {
+ return err
+ }
+ _field := make([]int64, 0, size)
+ for i := 0; i < size; i++ {
-func (p *TMoveDirReq) ReadField5(iprot thrift.TProtocol) error {
- if v, err := iprot.ReadBool(); err != nil {
+ var _elem int64
+ if v, err := iprot.ReadI64(); err != nil {
+ return err
+ } else {
+ _elem = v
+ }
+
+ _field = append(_field, _elem)
+ }
+ if err := iprot.ReadListEnd(); err != nil {
return err
- } else {
- p.Overwrite = v
}
+ p.CumulativeCompactionCnts = _field
return nil
}
+func (p *TCalcDeleteBitmapPartitionInfo) ReadField6(iprot thrift.TProtocol) error {
+ _, size, err := iprot.ReadListBegin()
+ if err != nil {
+ return err
+ }
+ _field := make([]int64, 0, size)
+ for i := 0; i < size; i++ {
-func (p *TMoveDirReq) Write(oprot thrift.TProtocol) (err error) {
+ var _elem int64
+ if v, err := iprot.ReadI64(); err != nil {
+ return err
+ } else {
+ _elem = v
+ }
+
+ _field = append(_field, _elem)
+ }
+ if err := iprot.ReadListEnd(); err != nil {
+ return err
+ }
+ p.CumulativePoints = _field
+ return nil
+}
+func (p *TCalcDeleteBitmapPartitionInfo) ReadField7(iprot thrift.TProtocol) error {
+ _, size, err := iprot.ReadListBegin()
+ if err != nil {
+ return err
+ }
+ _field := make([]int64, 0, size)
+ for i := 0; i < size; i++ {
+
+ var _elem int64
+ if v, err := iprot.ReadI64(); err != nil {
+ return err
+ } else {
+ _elem = v
+ }
+
+ _field = append(_field, _elem)
+ }
+ if err := iprot.ReadListEnd(); err != nil {
+ return err
+ }
+ p.SubTxnIds = _field
+ return nil
+}
+
+func (p *TCalcDeleteBitmapPartitionInfo) Write(oprot thrift.TProtocol) (err error) {
var fieldId int16
- if err = oprot.WriteStructBegin("TMoveDirReq"); err != nil {
+ if err = oprot.WriteStructBegin("TCalcDeleteBitmapPartitionInfo"); err != nil {
goto WriteStructBeginError
}
if p != nil {
@@ -17988,7 +21104,14 @@ func (p *TMoveDirReq) Write(oprot thrift.TProtocol) (err error) {
fieldId = 5
goto WriteFieldError
}
-
+ if err = p.writeField6(oprot); err != nil {
+ fieldId = 6
+ goto WriteFieldError
+ }
+ if err = p.writeField7(oprot); err != nil {
+ fieldId = 7
+ goto WriteFieldError
+ }
}
if err = oprot.WriteFieldStop(); err != nil {
goto WriteFieldStopError
@@ -18007,11 +21130,11 @@ WriteStructEndError:
return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err)
}
-func (p *TMoveDirReq) writeField1(oprot thrift.TProtocol) (err error) {
- if err = oprot.WriteFieldBegin("tablet_id", thrift.I64, 1); err != nil {
+func (p *TCalcDeleteBitmapPartitionInfo) writeField1(oprot thrift.TProtocol) (err error) {
+ if err = oprot.WriteFieldBegin("partition_id", thrift.I64, 1); err != nil {
goto WriteFieldBeginError
}
- if err := oprot.WriteI64(p.TabletId); err != nil {
+ if err := oprot.WriteI64(p.PartitionId); err != nil {
return err
}
if err = oprot.WriteFieldEnd(); err != nil {
@@ -18024,11 +21147,11 @@ WriteFieldEndError:
return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err)
}
-func (p *TMoveDirReq) writeField2(oprot thrift.TProtocol) (err error) {
- if err = oprot.WriteFieldBegin("schema_hash", thrift.I32, 2); err != nil {
+func (p *TCalcDeleteBitmapPartitionInfo) writeField2(oprot thrift.TProtocol) (err error) {
+ if err = oprot.WriteFieldBegin("version", thrift.I64, 2); err != nil {
goto WriteFieldBeginError
}
- if err := oprot.WriteI32(p.SchemaHash); err != nil {
+ if err := oprot.WriteI64(p.Version); err != nil {
return err
}
if err = oprot.WriteFieldEnd(); err != nil {
@@ -18041,11 +21164,19 @@ WriteFieldEndError:
return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err)
}
-func (p *TMoveDirReq) writeField3(oprot thrift.TProtocol) (err error) {
- if err = oprot.WriteFieldBegin("src", thrift.STRING, 3); err != nil {
+func (p *TCalcDeleteBitmapPartitionInfo) writeField3(oprot thrift.TProtocol) (err error) {
+ if err = oprot.WriteFieldBegin("tablet_ids", thrift.LIST, 3); err != nil {
goto WriteFieldBeginError
}
- if err := oprot.WriteString(p.Src); err != nil {
+ if err := oprot.WriteListBegin(thrift.I64, len(p.TabletIds)); err != nil {
+ return err
+ }
+ for _, v := range p.TabletIds {
+ if err := oprot.WriteI64(v); err != nil {
+ return err
+ }
+ }
+ if err := oprot.WriteListEnd(); err != nil {
return err
}
if err = oprot.WriteFieldEnd(); err != nil {
@@ -18058,15 +21189,25 @@ WriteFieldEndError:
return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err)
}
-func (p *TMoveDirReq) writeField4(oprot thrift.TProtocol) (err error) {
- if err = oprot.WriteFieldBegin("job_id", thrift.I64, 4); err != nil {
- goto WriteFieldBeginError
- }
- if err := oprot.WriteI64(p.JobId); err != nil {
- return err
- }
- if err = oprot.WriteFieldEnd(); err != nil {
- goto WriteFieldEndError
+func (p *TCalcDeleteBitmapPartitionInfo) writeField4(oprot thrift.TProtocol) (err error) {
+ if p.IsSetBaseCompactionCnts() {
+ if err = oprot.WriteFieldBegin("base_compaction_cnts", thrift.LIST, 4); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteListBegin(thrift.I64, len(p.BaseCompactionCnts)); err != nil {
+ return err
+ }
+ for _, v := range p.BaseCompactionCnts {
+ if err := oprot.WriteI64(v); err != nil {
+ return err
+ }
+ }
+ if err := oprot.WriteListEnd(); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
}
return nil
WriteFieldBeginError:
@@ -18075,15 +21216,25 @@ WriteFieldEndError:
return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err)
}
-func (p *TMoveDirReq) writeField5(oprot thrift.TProtocol) (err error) {
- if err = oprot.WriteFieldBegin("overwrite", thrift.BOOL, 5); err != nil {
- goto WriteFieldBeginError
- }
- if err := oprot.WriteBool(p.Overwrite); err != nil {
- return err
- }
- if err = oprot.WriteFieldEnd(); err != nil {
- goto WriteFieldEndError
+func (p *TCalcDeleteBitmapPartitionInfo) writeField5(oprot thrift.TProtocol) (err error) {
+ if p.IsSetCumulativeCompactionCnts() {
+ if err = oprot.WriteFieldBegin("cumulative_compaction_cnts", thrift.LIST, 5); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteListBegin(thrift.I64, len(p.CumulativeCompactionCnts)); err != nil {
+ return err
+ }
+ for _, v := range p.CumulativeCompactionCnts {
+ if err := oprot.WriteI64(v); err != nil {
+ return err
+ }
+ }
+ if err := oprot.WriteListEnd(); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
}
return nil
WriteFieldBeginError:
@@ -18092,135 +21243,215 @@ WriteFieldEndError:
return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err)
}
-func (p *TMoveDirReq) String() string {
+func (p *TCalcDeleteBitmapPartitionInfo) writeField6(oprot thrift.TProtocol) (err error) {
+ if p.IsSetCumulativePoints() {
+ if err = oprot.WriteFieldBegin("cumulative_points", thrift.LIST, 6); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteListBegin(thrift.I64, len(p.CumulativePoints)); err != nil {
+ return err
+ }
+ for _, v := range p.CumulativePoints {
+ if err := oprot.WriteI64(v); err != nil {
+ return err
+ }
+ }
+ if err := oprot.WriteListEnd(); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 6 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 6 end error: ", p), err)
+}
+
+func (p *TCalcDeleteBitmapPartitionInfo) writeField7(oprot thrift.TProtocol) (err error) {
+ if p.IsSetSubTxnIds() {
+ if err = oprot.WriteFieldBegin("sub_txn_ids", thrift.LIST, 7); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteListBegin(thrift.I64, len(p.SubTxnIds)); err != nil {
+ return err
+ }
+ for _, v := range p.SubTxnIds {
+ if err := oprot.WriteI64(v); err != nil {
+ return err
+ }
+ }
+ if err := oprot.WriteListEnd(); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 7 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 7 end error: ", p), err)
+}
+
+func (p *TCalcDeleteBitmapPartitionInfo) String() string {
if p == nil {
return ""
}
- return fmt.Sprintf("TMoveDirReq(%+v)", *p)
+ return fmt.Sprintf("TCalcDeleteBitmapPartitionInfo(%+v)", *p)
+
}
-func (p *TMoveDirReq) DeepEqual(ano *TMoveDirReq) bool {
+func (p *TCalcDeleteBitmapPartitionInfo) DeepEqual(ano *TCalcDeleteBitmapPartitionInfo) bool {
if p == ano {
return true
} else if p == nil || ano == nil {
return false
}
- if !p.Field1DeepEqual(ano.TabletId) {
+ if !p.Field1DeepEqual(ano.PartitionId) {
return false
}
- if !p.Field2DeepEqual(ano.SchemaHash) {
+ if !p.Field2DeepEqual(ano.Version) {
return false
}
- if !p.Field3DeepEqual(ano.Src) {
+ if !p.Field3DeepEqual(ano.TabletIds) {
return false
}
- if !p.Field4DeepEqual(ano.JobId) {
+ if !p.Field4DeepEqual(ano.BaseCompactionCnts) {
return false
}
- if !p.Field5DeepEqual(ano.Overwrite) {
+ if !p.Field5DeepEqual(ano.CumulativeCompactionCnts) {
+ return false
+ }
+ if !p.Field6DeepEqual(ano.CumulativePoints) {
+ return false
+ }
+ if !p.Field7DeepEqual(ano.SubTxnIds) {
return false
}
return true
}
-func (p *TMoveDirReq) Field1DeepEqual(src types.TTabletId) bool {
+func (p *TCalcDeleteBitmapPartitionInfo) Field1DeepEqual(src types.TPartitionId) bool {
+
+ if p.PartitionId != src {
+ return false
+ }
+ return true
+}
+func (p *TCalcDeleteBitmapPartitionInfo) Field2DeepEqual(src types.TVersion) bool {
+
+ if p.Version != src {
+ return false
+ }
+ return true
+}
+func (p *TCalcDeleteBitmapPartitionInfo) Field3DeepEqual(src []types.TTabletId) bool {
- if p.TabletId != src {
+ if len(p.TabletIds) != len(src) {
return false
}
+ for i, v := range p.TabletIds {
+ _src := src[i]
+ if v != _src {
+ return false
+ }
+ }
return true
}
-func (p *TMoveDirReq) Field2DeepEqual(src types.TSchemaHash) bool {
+func (p *TCalcDeleteBitmapPartitionInfo) Field4DeepEqual(src []int64) bool {
- if p.SchemaHash != src {
+ if len(p.BaseCompactionCnts) != len(src) {
return false
}
+ for i, v := range p.BaseCompactionCnts {
+ _src := src[i]
+ if v != _src {
+ return false
+ }
+ }
return true
}
-func (p *TMoveDirReq) Field3DeepEqual(src string) bool {
+func (p *TCalcDeleteBitmapPartitionInfo) Field5DeepEqual(src []int64) bool {
- if strings.Compare(p.Src, src) != 0 {
+ if len(p.CumulativeCompactionCnts) != len(src) {
return false
}
+ for i, v := range p.CumulativeCompactionCnts {
+ _src := src[i]
+ if v != _src {
+ return false
+ }
+ }
return true
}
-func (p *TMoveDirReq) Field4DeepEqual(src int64) bool {
+func (p *TCalcDeleteBitmapPartitionInfo) Field6DeepEqual(src []int64) bool {
- if p.JobId != src {
+ if len(p.CumulativePoints) != len(src) {
return false
}
+ for i, v := range p.CumulativePoints {
+ _src := src[i]
+ if v != _src {
+ return false
+ }
+ }
return true
}
-func (p *TMoveDirReq) Field5DeepEqual(src bool) bool {
+func (p *TCalcDeleteBitmapPartitionInfo) Field7DeepEqual(src []int64) bool {
- if p.Overwrite != src {
+ if len(p.SubTxnIds) != len(src) {
return false
}
+ for i, v := range p.SubTxnIds {
+ _src := src[i]
+ if v != _src {
+ return false
+ }
+ }
return true
}
-type TPublishVersionRequest struct {
- TransactionId types.TTransactionId `thrift:"transaction_id,1,required" frugal:"1,required,i64" json:"transaction_id"`
- PartitionVersionInfos []*TPartitionVersionInfo `thrift:"partition_version_infos,2,required" frugal:"2,required,list" json:"partition_version_infos"`
- StrictMode bool `thrift:"strict_mode,3,optional" frugal:"3,optional,bool" json:"strict_mode,omitempty"`
+type TCalcDeleteBitmapRequest struct {
+ TransactionId types.TTransactionId `thrift:"transaction_id,1,required" frugal:"1,required,i64" json:"transaction_id"`
+ Partitions []*TCalcDeleteBitmapPartitionInfo `thrift:"partitions,2,required" frugal:"2,required,list" json:"partitions"`
}
-func NewTPublishVersionRequest() *TPublishVersionRequest {
- return &TPublishVersionRequest{
-
- StrictMode: false,
- }
+func NewTCalcDeleteBitmapRequest() *TCalcDeleteBitmapRequest {
+ return &TCalcDeleteBitmapRequest{}
}
-func (p *TPublishVersionRequest) InitDefault() {
- *p = TPublishVersionRequest{
-
- StrictMode: false,
- }
+func (p *TCalcDeleteBitmapRequest) InitDefault() {
}
-func (p *TPublishVersionRequest) GetTransactionId() (v types.TTransactionId) {
+func (p *TCalcDeleteBitmapRequest) GetTransactionId() (v types.TTransactionId) {
return p.TransactionId
}
-func (p *TPublishVersionRequest) GetPartitionVersionInfos() (v []*TPartitionVersionInfo) {
- return p.PartitionVersionInfos
-}
-
-var TPublishVersionRequest_StrictMode_DEFAULT bool = false
-
-func (p *TPublishVersionRequest) GetStrictMode() (v bool) {
- if !p.IsSetStrictMode() {
- return TPublishVersionRequest_StrictMode_DEFAULT
- }
- return p.StrictMode
+func (p *TCalcDeleteBitmapRequest) GetPartitions() (v []*TCalcDeleteBitmapPartitionInfo) {
+ return p.Partitions
}
-func (p *TPublishVersionRequest) SetTransactionId(val types.TTransactionId) {
+func (p *TCalcDeleteBitmapRequest) SetTransactionId(val types.TTransactionId) {
p.TransactionId = val
}
-func (p *TPublishVersionRequest) SetPartitionVersionInfos(val []*TPartitionVersionInfo) {
- p.PartitionVersionInfos = val
-}
-func (p *TPublishVersionRequest) SetStrictMode(val bool) {
- p.StrictMode = val
+func (p *TCalcDeleteBitmapRequest) SetPartitions(val []*TCalcDeleteBitmapPartitionInfo) {
+ p.Partitions = val
}
-var fieldIDToName_TPublishVersionRequest = map[int16]string{
+var fieldIDToName_TCalcDeleteBitmapRequest = map[int16]string{
1: "transaction_id",
- 2: "partition_version_infos",
- 3: "strict_mode",
-}
-
-func (p *TPublishVersionRequest) IsSetStrictMode() bool {
- return p.StrictMode != TPublishVersionRequest_StrictMode_DEFAULT
+ 2: "partitions",
}
-func (p *TPublishVersionRequest) Read(iprot thrift.TProtocol) (err error) {
+func (p *TCalcDeleteBitmapRequest) Read(iprot thrift.TProtocol) (err error) {
var fieldTypeId thrift.TType
var fieldId int16
var issetTransactionId bool = false
- var issetPartitionVersionInfos bool = false
+ var issetPartitions bool = false
if _, err = iprot.ReadStructBegin(); err != nil {
goto ReadStructBeginError
@@ -18242,38 +21473,23 @@ func (p *TPublishVersionRequest) Read(iprot thrift.TProtocol) (err error) {
goto ReadFieldError
}
issetTransactionId = true
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 2:
if fieldTypeId == thrift.LIST {
if err = p.ReadField2(iprot); err != nil {
goto ReadFieldError
}
- issetPartitionVersionInfos = true
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
- }
- case 3:
- if fieldTypeId == thrift.BOOL {
- if err = p.ReadField3(iprot); err != nil {
- goto ReadFieldError
- }
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ issetPartitions = true
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
default:
if err = iprot.Skip(fieldTypeId); err != nil {
goto SkipFieldError
}
}
-
if err = iprot.ReadFieldEnd(); err != nil {
goto ReadFieldEndError
}
@@ -18287,7 +21503,7 @@ func (p *TPublishVersionRequest) Read(iprot thrift.TProtocol) (err error) {
goto RequiredFieldNotSetError
}
- if !issetPartitionVersionInfos {
+ if !issetPartitions {
fieldId = 2
goto RequiredFieldNotSetError
}
@@ -18297,7 +21513,7 @@ ReadStructBeginError:
ReadFieldBeginError:
return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
ReadFieldError:
- return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TPublishVersionRequest[fieldId]), err)
+ return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TCalcDeleteBitmapRequest[fieldId]), err)
SkipFieldError:
return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
@@ -18306,50 +21522,47 @@ ReadFieldEndError:
ReadStructEndError:
return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
RequiredFieldNotSetError:
- return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TPublishVersionRequest[fieldId]))
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TCalcDeleteBitmapRequest[fieldId]))
}
-func (p *TPublishVersionRequest) ReadField1(iprot thrift.TProtocol) error {
+func (p *TCalcDeleteBitmapRequest) ReadField1(iprot thrift.TProtocol) error {
+
+ var _field types.TTransactionId
if v, err := iprot.ReadI64(); err != nil {
return err
} else {
- p.TransactionId = v
+ _field = v
}
+ p.TransactionId = _field
return nil
}
-
-func (p *TPublishVersionRequest) ReadField2(iprot thrift.TProtocol) error {
+func (p *TCalcDeleteBitmapRequest) ReadField2(iprot thrift.TProtocol) error {
_, size, err := iprot.ReadListBegin()
if err != nil {
return err
}
- p.PartitionVersionInfos = make([]*TPartitionVersionInfo, 0, size)
+ _field := make([]*TCalcDeleteBitmapPartitionInfo, 0, size)
+ values := make([]TCalcDeleteBitmapPartitionInfo, size)
for i := 0; i < size; i++ {
- _elem := NewTPartitionVersionInfo()
+ _elem := &values[i]
+ _elem.InitDefault()
+
if err := _elem.Read(iprot); err != nil {
return err
}
- p.PartitionVersionInfos = append(p.PartitionVersionInfos, _elem)
+ _field = append(_field, _elem)
}
if err := iprot.ReadListEnd(); err != nil {
return err
}
+ p.Partitions = _field
return nil
}
-func (p *TPublishVersionRequest) ReadField3(iprot thrift.TProtocol) error {
- if v, err := iprot.ReadBool(); err != nil {
- return err
- } else {
- p.StrictMode = v
- }
- return nil
-}
-
-func (p *TPublishVersionRequest) Write(oprot thrift.TProtocol) (err error) {
+func (p *TCalcDeleteBitmapRequest) Write(oprot thrift.TProtocol) (err error) {
var fieldId int16
- if err = oprot.WriteStructBegin("TPublishVersionRequest"); err != nil {
+ if err = oprot.WriteStructBegin("TCalcDeleteBitmapRequest"); err != nil {
goto WriteStructBeginError
}
if p != nil {
@@ -18361,11 +21574,6 @@ func (p *TPublishVersionRequest) Write(oprot thrift.TProtocol) (err error) {
fieldId = 2
goto WriteFieldError
}
- if err = p.writeField3(oprot); err != nil {
- fieldId = 3
- goto WriteFieldError
- }
-
}
if err = oprot.WriteFieldStop(); err != nil {
goto WriteFieldStopError
@@ -18384,7 +21592,7 @@ WriteStructEndError:
return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err)
}
-func (p *TPublishVersionRequest) writeField1(oprot thrift.TProtocol) (err error) {
+func (p *TCalcDeleteBitmapRequest) writeField1(oprot thrift.TProtocol) (err error) {
if err = oprot.WriteFieldBegin("transaction_id", thrift.I64, 1); err != nil {
goto WriteFieldBeginError
}
@@ -18401,14 +21609,14 @@ WriteFieldEndError:
return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err)
}
-func (p *TPublishVersionRequest) writeField2(oprot thrift.TProtocol) (err error) {
- if err = oprot.WriteFieldBegin("partition_version_infos", thrift.LIST, 2); err != nil {
+func (p *TCalcDeleteBitmapRequest) writeField2(oprot thrift.TProtocol) (err error) {
+ if err = oprot.WriteFieldBegin("partitions", thrift.LIST, 2); err != nil {
goto WriteFieldBeginError
}
- if err := oprot.WriteListBegin(thrift.STRUCT, len(p.PartitionVersionInfos)); err != nil {
+ if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Partitions)); err != nil {
return err
}
- for _, v := range p.PartitionVersionInfos {
+ for _, v := range p.Partitions {
if err := v.Write(oprot); err != nil {
return err
}
@@ -18426,33 +21634,15 @@ WriteFieldEndError:
return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err)
}
-func (p *TPublishVersionRequest) writeField3(oprot thrift.TProtocol) (err error) {
- if p.IsSetStrictMode() {
- if err = oprot.WriteFieldBegin("strict_mode", thrift.BOOL, 3); err != nil {
- goto WriteFieldBeginError
- }
- if err := oprot.WriteBool(p.StrictMode); err != nil {
- return err
- }
- if err = oprot.WriteFieldEnd(); err != nil {
- goto WriteFieldEndError
- }
- }
- return nil
-WriteFieldBeginError:
- return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err)
-WriteFieldEndError:
- return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err)
-}
-
-func (p *TPublishVersionRequest) String() string {
+func (p *TCalcDeleteBitmapRequest) String() string {
if p == nil {
return ""
}
- return fmt.Sprintf("TPublishVersionRequest(%+v)", *p)
+ return fmt.Sprintf("TCalcDeleteBitmapRequest(%+v)", *p)
+
}
-func (p *TPublishVersionRequest) DeepEqual(ano *TPublishVersionRequest) bool {
+func (p *TCalcDeleteBitmapRequest) DeepEqual(ano *TCalcDeleteBitmapRequest) bool {
if p == ano {
return true
} else if p == nil || ano == nil {
@@ -18461,28 +21651,25 @@ func (p *TPublishVersionRequest) DeepEqual(ano *TPublishVersionRequest) bool {
if !p.Field1DeepEqual(ano.TransactionId) {
return false
}
- if !p.Field2DeepEqual(ano.PartitionVersionInfos) {
- return false
- }
- if !p.Field3DeepEqual(ano.StrictMode) {
+ if !p.Field2DeepEqual(ano.Partitions) {
return false
}
return true
}
-func (p *TPublishVersionRequest) Field1DeepEqual(src types.TTransactionId) bool {
+func (p *TCalcDeleteBitmapRequest) Field1DeepEqual(src types.TTransactionId) bool {
if p.TransactionId != src {
return false
}
return true
}
-func (p *TPublishVersionRequest) Field2DeepEqual(src []*TPartitionVersionInfo) bool {
+func (p *TCalcDeleteBitmapRequest) Field2DeepEqual(src []*TCalcDeleteBitmapPartitionInfo) bool {
- if len(p.PartitionVersionInfos) != len(src) {
+ if len(p.Partitions) != len(src) {
return false
}
- for i, v := range p.PartitionVersionInfos {
+ for i, v := range p.Partitions {
_src := src[i]
if !v.DeepEqual(_src) {
return false
@@ -18490,13 +21677,6 @@ func (p *TPublishVersionRequest) Field2DeepEqual(src []*TPartitionVersionInfo) b
}
return true
}
-func (p *TPublishVersionRequest) Field3DeepEqual(src bool) bool {
-
- if p.StrictMode != src {
- return false
- }
- return true
-}
type TClearAlterTaskRequest struct {
TabletId types.TTabletId `thrift:"tablet_id,1,required" frugal:"1,required,i64" json:"tablet_id"`
@@ -18508,7 +21688,6 @@ func NewTClearAlterTaskRequest() *TClearAlterTaskRequest {
}
func (p *TClearAlterTaskRequest) InitDefault() {
- *p = TClearAlterTaskRequest{}
}
func (p *TClearAlterTaskRequest) GetTabletId() (v types.TTabletId) {
@@ -18557,10 +21736,8 @@ func (p *TClearAlterTaskRequest) Read(iprot thrift.TProtocol) (err error) {
goto ReadFieldError
}
issetTabletId = true
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 2:
if fieldTypeId == thrift.I32 {
@@ -18568,17 +21745,14 @@ func (p *TClearAlterTaskRequest) Read(iprot thrift.TProtocol) (err error) {
goto ReadFieldError
}
issetSchemaHash = true
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
default:
if err = iprot.Skip(fieldTypeId); err != nil {
goto SkipFieldError
}
}
-
if err = iprot.ReadFieldEnd(); err != nil {
goto ReadFieldEndError
}
@@ -18615,20 +21789,25 @@ RequiredFieldNotSetError:
}
func (p *TClearAlterTaskRequest) ReadField1(iprot thrift.TProtocol) error {
+
+ var _field types.TTabletId
if v, err := iprot.ReadI64(); err != nil {
return err
} else {
- p.TabletId = v
+ _field = v
}
+ p.TabletId = _field
return nil
}
-
func (p *TClearAlterTaskRequest) ReadField2(iprot thrift.TProtocol) error {
+
+ var _field types.TSchemaHash
if v, err := iprot.ReadI32(); err != nil {
return err
} else {
- p.SchemaHash = v
+ _field = v
}
+ p.SchemaHash = _field
return nil
}
@@ -18646,7 +21825,6 @@ func (p *TClearAlterTaskRequest) Write(oprot thrift.TProtocol) (err error) {
fieldId = 2
goto WriteFieldError
}
-
}
if err = oprot.WriteFieldStop(); err != nil {
goto WriteFieldStopError
@@ -18704,6 +21882,7 @@ func (p *TClearAlterTaskRequest) String() string {
return ""
}
return fmt.Sprintf("TClearAlterTaskRequest(%+v)", *p)
+
}
func (p *TClearAlterTaskRequest) DeepEqual(ano *TClearAlterTaskRequest) bool {
@@ -18746,7 +21925,6 @@ func NewTClearTransactionTaskRequest() *TClearTransactionTaskRequest {
}
func (p *TClearTransactionTaskRequest) InitDefault() {
- *p = TClearTransactionTaskRequest{}
}
func (p *TClearTransactionTaskRequest) GetTransactionId() (v types.TTransactionId) {
@@ -18795,10 +21973,8 @@ func (p *TClearTransactionTaskRequest) Read(iprot thrift.TProtocol) (err error)
goto ReadFieldError
}
issetTransactionId = true
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 2:
if fieldTypeId == thrift.LIST {
@@ -18806,17 +21982,14 @@ func (p *TClearTransactionTaskRequest) Read(iprot thrift.TProtocol) (err error)
goto ReadFieldError
}
issetPartitionId = true
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
default:
if err = iprot.Skip(fieldTypeId); err != nil {
goto SkipFieldError
}
}
-
if err = iprot.ReadFieldEnd(); err != nil {
goto ReadFieldEndError
}
@@ -18853,21 +22026,24 @@ RequiredFieldNotSetError:
}
func (p *TClearTransactionTaskRequest) ReadField1(iprot thrift.TProtocol) error {
+
+ var _field types.TTransactionId
if v, err := iprot.ReadI64(); err != nil {
return err
} else {
- p.TransactionId = v
+ _field = v
}
+ p.TransactionId = _field
return nil
}
-
func (p *TClearTransactionTaskRequest) ReadField2(iprot thrift.TProtocol) error {
_, size, err := iprot.ReadListBegin()
if err != nil {
return err
}
- p.PartitionId = make([]types.TPartitionId, 0, size)
+ _field := make([]types.TPartitionId, 0, size)
for i := 0; i < size; i++ {
+
var _elem types.TPartitionId
if v, err := iprot.ReadI64(); err != nil {
return err
@@ -18875,11 +22051,12 @@ func (p *TClearTransactionTaskRequest) ReadField2(iprot thrift.TProtocol) error
_elem = v
}
- p.PartitionId = append(p.PartitionId, _elem)
+ _field = append(_field, _elem)
}
if err := iprot.ReadListEnd(); err != nil {
return err
}
+ p.PartitionId = _field
return nil
}
@@ -18897,7 +22074,6 @@ func (p *TClearTransactionTaskRequest) Write(oprot thrift.TProtocol) (err error)
fieldId = 2
goto WriteFieldError
}
-
}
if err = oprot.WriteFieldStop(); err != nil {
goto WriteFieldStopError
@@ -18963,6 +22139,7 @@ func (p *TClearTransactionTaskRequest) String() string {
return ""
}
return fmt.Sprintf("TClearTransactionTaskRequest(%+v)", *p)
+
}
func (p *TClearTransactionTaskRequest) DeepEqual(ano *TClearTransactionTaskRequest) bool {
@@ -19013,7 +22190,6 @@ func NewTRecoverTabletReq() *TRecoverTabletReq {
}
func (p *TRecoverTabletReq) InitDefault() {
- *p = TRecoverTabletReq{}
}
var TRecoverTabletReq_TabletId_DEFAULT types.TTabletId
@@ -19111,47 +22287,38 @@ func (p *TRecoverTabletReq) Read(iprot thrift.TProtocol) (err error) {
if err = p.ReadField1(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 2:
if fieldTypeId == thrift.I32 {
if err = p.ReadField2(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 3:
if fieldTypeId == thrift.I64 {
if err = p.ReadField3(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 4:
if fieldTypeId == thrift.I64 {
if err = p.ReadField4(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
default:
if err = iprot.Skip(fieldTypeId); err != nil {
goto SkipFieldError
}
}
-
if err = iprot.ReadFieldEnd(); err != nil {
goto ReadFieldEndError
}
@@ -19177,38 +22344,47 @@ ReadStructEndError:
}
func (p *TRecoverTabletReq) ReadField1(iprot thrift.TProtocol) error {
+
+ var _field *types.TTabletId
if v, err := iprot.ReadI64(); err != nil {
return err
} else {
- p.TabletId = &v
+ _field = &v
}
+ p.TabletId = _field
return nil
}
-
func (p *TRecoverTabletReq) ReadField2(iprot thrift.TProtocol) error {
+
+ var _field *types.TSchemaHash
if v, err := iprot.ReadI32(); err != nil {
return err
} else {
- p.SchemaHash = &v
+ _field = &v
}
+ p.SchemaHash = _field
return nil
}
-
func (p *TRecoverTabletReq) ReadField3(iprot thrift.TProtocol) error {
+
+ var _field *types.TVersion
if v, err := iprot.ReadI64(); err != nil {
return err
} else {
- p.Version = &v
+ _field = &v
}
+ p.Version = _field
return nil
}
-
func (p *TRecoverTabletReq) ReadField4(iprot thrift.TProtocol) error {
+
+ var _field *types.TVersionHash
if v, err := iprot.ReadI64(); err != nil {
return err
} else {
- p.VersionHash = &v
+ _field = &v
}
+ p.VersionHash = _field
return nil
}
@@ -19234,7 +22410,6 @@ func (p *TRecoverTabletReq) Write(oprot thrift.TProtocol) (err error) {
fieldId = 4
goto WriteFieldError
}
-
}
if err = oprot.WriteFieldStop(); err != nil {
goto WriteFieldStopError
@@ -19334,6 +22509,7 @@ func (p *TRecoverTabletReq) String() string {
return ""
}
return fmt.Sprintf("TRecoverTabletReq(%+v)", *p)
+
}
func (p *TRecoverTabletReq) DeepEqual(ano *TRecoverTabletReq) bool {
@@ -19407,19 +22583,22 @@ func (p *TRecoverTabletReq) Field4DeepEqual(src *types.TVersionHash) bool {
}
type TTabletMetaInfo struct {
- TabletId *types.TTabletId `thrift:"tablet_id,1,optional" frugal:"1,optional,i64" json:"tablet_id,omitempty"`
- SchemaHash *types.TSchemaHash `thrift:"schema_hash,2,optional" frugal:"2,optional,i32" json:"schema_hash,omitempty"`
- PartitionId *types.TPartitionId `thrift:"partition_id,3,optional" frugal:"3,optional,i64" json:"partition_id,omitempty"`
- IsInMemory *bool `thrift:"is_in_memory,5,optional" frugal:"5,optional,bool" json:"is_in_memory,omitempty"`
- StoragePolicyId *int64 `thrift:"storage_policy_id,7,optional" frugal:"7,optional,i64" json:"storage_policy_id,omitempty"`
- ReplicaId *types.TReplicaId `thrift:"replica_id,8,optional" frugal:"8,optional,i64" json:"replica_id,omitempty"`
- BinlogConfig *TBinlogConfig `thrift:"binlog_config,9,optional" frugal:"9,optional,TBinlogConfig" json:"binlog_config,omitempty"`
- CompactionPolicy *string `thrift:"compaction_policy,10,optional" frugal:"10,optional,string" json:"compaction_policy,omitempty"`
- TimeSeriesCompactionGoalSizeMbytes *int64 `thrift:"time_series_compaction_goal_size_mbytes,11,optional" frugal:"11,optional,i64" json:"time_series_compaction_goal_size_mbytes,omitempty"`
- TimeSeriesCompactionFileCountThreshold *int64 `thrift:"time_series_compaction_file_count_threshold,12,optional" frugal:"12,optional,i64" json:"time_series_compaction_file_count_threshold,omitempty"`
- TimeSeriesCompactionTimeThresholdSeconds *int64 `thrift:"time_series_compaction_time_threshold_seconds,13,optional" frugal:"13,optional,i64" json:"time_series_compaction_time_threshold_seconds,omitempty"`
- EnableSingleReplicaCompaction *bool `thrift:"enable_single_replica_compaction,14,optional" frugal:"14,optional,bool" json:"enable_single_replica_compaction,omitempty"`
- SkipWriteIndexOnLoad *bool `thrift:"skip_write_index_on_load,15,optional" frugal:"15,optional,bool" json:"skip_write_index_on_load,omitempty"`
+ TabletId *types.TTabletId `thrift:"tablet_id,1,optional" frugal:"1,optional,i64" json:"tablet_id,omitempty"`
+ SchemaHash *types.TSchemaHash `thrift:"schema_hash,2,optional" frugal:"2,optional,i32" json:"schema_hash,omitempty"`
+ PartitionId *types.TPartitionId `thrift:"partition_id,3,optional" frugal:"3,optional,i64" json:"partition_id,omitempty"`
+ IsInMemory *bool `thrift:"is_in_memory,5,optional" frugal:"5,optional,bool" json:"is_in_memory,omitempty"`
+ StoragePolicyId *int64 `thrift:"storage_policy_id,7,optional" frugal:"7,optional,i64" json:"storage_policy_id,omitempty"`
+ ReplicaId *types.TReplicaId `thrift:"replica_id,8,optional" frugal:"8,optional,i64" json:"replica_id,omitempty"`
+ BinlogConfig *TBinlogConfig `thrift:"binlog_config,9,optional" frugal:"9,optional,TBinlogConfig" json:"binlog_config,omitempty"`
+ CompactionPolicy *string `thrift:"compaction_policy,10,optional" frugal:"10,optional,string" json:"compaction_policy,omitempty"`
+ TimeSeriesCompactionGoalSizeMbytes *int64 `thrift:"time_series_compaction_goal_size_mbytes,11,optional" frugal:"11,optional,i64" json:"time_series_compaction_goal_size_mbytes,omitempty"`
+ TimeSeriesCompactionFileCountThreshold *int64 `thrift:"time_series_compaction_file_count_threshold,12,optional" frugal:"12,optional,i64" json:"time_series_compaction_file_count_threshold,omitempty"`
+ TimeSeriesCompactionTimeThresholdSeconds *int64 `thrift:"time_series_compaction_time_threshold_seconds,13,optional" frugal:"13,optional,i64" json:"time_series_compaction_time_threshold_seconds,omitempty"`
+ EnableSingleReplicaCompaction *bool `thrift:"enable_single_replica_compaction,14,optional" frugal:"14,optional,bool" json:"enable_single_replica_compaction,omitempty"`
+ SkipWriteIndexOnLoad *bool `thrift:"skip_write_index_on_load,15,optional" frugal:"15,optional,bool" json:"skip_write_index_on_load,omitempty"`
+ DisableAutoCompaction *bool `thrift:"disable_auto_compaction,16,optional" frugal:"16,optional,bool" json:"disable_auto_compaction,omitempty"`
+ TimeSeriesCompactionEmptyRowsetsThreshold *int64 `thrift:"time_series_compaction_empty_rowsets_threshold,17,optional" frugal:"17,optional,i64" json:"time_series_compaction_empty_rowsets_threshold,omitempty"`
+ TimeSeriesCompactionLevelThreshold *int64 `thrift:"time_series_compaction_level_threshold,18,optional" frugal:"18,optional,i64" json:"time_series_compaction_level_threshold,omitempty"`
}
func NewTTabletMetaInfo() *TTabletMetaInfo {
@@ -19427,7 +22606,6 @@ func NewTTabletMetaInfo() *TTabletMetaInfo {
}
func (p *TTabletMetaInfo) InitDefault() {
- *p = TTabletMetaInfo{}
}
var TTabletMetaInfo_TabletId_DEFAULT types.TTabletId
@@ -19546,6 +22724,33 @@ func (p *TTabletMetaInfo) GetSkipWriteIndexOnLoad() (v bool) {
}
return *p.SkipWriteIndexOnLoad
}
+
+var TTabletMetaInfo_DisableAutoCompaction_DEFAULT bool
+
+func (p *TTabletMetaInfo) GetDisableAutoCompaction() (v bool) {
+ if !p.IsSetDisableAutoCompaction() {
+ return TTabletMetaInfo_DisableAutoCompaction_DEFAULT
+ }
+ return *p.DisableAutoCompaction
+}
+
+var TTabletMetaInfo_TimeSeriesCompactionEmptyRowsetsThreshold_DEFAULT int64
+
+func (p *TTabletMetaInfo) GetTimeSeriesCompactionEmptyRowsetsThreshold() (v int64) {
+ if !p.IsSetTimeSeriesCompactionEmptyRowsetsThreshold() {
+ return TTabletMetaInfo_TimeSeriesCompactionEmptyRowsetsThreshold_DEFAULT
+ }
+ return *p.TimeSeriesCompactionEmptyRowsetsThreshold
+}
+
+var TTabletMetaInfo_TimeSeriesCompactionLevelThreshold_DEFAULT int64
+
+func (p *TTabletMetaInfo) GetTimeSeriesCompactionLevelThreshold() (v int64) {
+ if !p.IsSetTimeSeriesCompactionLevelThreshold() {
+ return TTabletMetaInfo_TimeSeriesCompactionLevelThreshold_DEFAULT
+ }
+ return *p.TimeSeriesCompactionLevelThreshold
+}
func (p *TTabletMetaInfo) SetTabletId(val *types.TTabletId) {
p.TabletId = val
}
@@ -19585,6 +22790,15 @@ func (p *TTabletMetaInfo) SetEnableSingleReplicaCompaction(val *bool) {
func (p *TTabletMetaInfo) SetSkipWriteIndexOnLoad(val *bool) {
p.SkipWriteIndexOnLoad = val
}
+func (p *TTabletMetaInfo) SetDisableAutoCompaction(val *bool) {
+ p.DisableAutoCompaction = val
+}
+func (p *TTabletMetaInfo) SetTimeSeriesCompactionEmptyRowsetsThreshold(val *int64) {
+ p.TimeSeriesCompactionEmptyRowsetsThreshold = val
+}
+func (p *TTabletMetaInfo) SetTimeSeriesCompactionLevelThreshold(val *int64) {
+ p.TimeSeriesCompactionLevelThreshold = val
+}
var fieldIDToName_TTabletMetaInfo = map[int16]string{
1: "tablet_id",
@@ -19600,6 +22814,9 @@ var fieldIDToName_TTabletMetaInfo = map[int16]string{
13: "time_series_compaction_time_threshold_seconds",
14: "enable_single_replica_compaction",
15: "skip_write_index_on_load",
+ 16: "disable_auto_compaction",
+ 17: "time_series_compaction_empty_rowsets_threshold",
+ 18: "time_series_compaction_level_threshold",
}
func (p *TTabletMetaInfo) IsSetTabletId() bool {
@@ -19654,6 +22871,18 @@ func (p *TTabletMetaInfo) IsSetSkipWriteIndexOnLoad() bool {
return p.SkipWriteIndexOnLoad != nil
}
+func (p *TTabletMetaInfo) IsSetDisableAutoCompaction() bool {
+ return p.DisableAutoCompaction != nil
+}
+
+func (p *TTabletMetaInfo) IsSetTimeSeriesCompactionEmptyRowsetsThreshold() bool {
+ return p.TimeSeriesCompactionEmptyRowsetsThreshold != nil
+}
+
+func (p *TTabletMetaInfo) IsSetTimeSeriesCompactionLevelThreshold() bool {
+ return p.TimeSeriesCompactionLevelThreshold != nil
+}
+
func (p *TTabletMetaInfo) Read(iprot thrift.TProtocol) (err error) {
var fieldTypeId thrift.TType
@@ -19678,137 +22907,134 @@ func (p *TTabletMetaInfo) Read(iprot thrift.TProtocol) (err error) {
if err = p.ReadField1(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 2:
if fieldTypeId == thrift.I32 {
if err = p.ReadField2(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 3:
if fieldTypeId == thrift.I64 {
if err = p.ReadField3(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 5:
if fieldTypeId == thrift.BOOL {
if err = p.ReadField5(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 7:
if fieldTypeId == thrift.I64 {
if err = p.ReadField7(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 8:
if fieldTypeId == thrift.I64 {
if err = p.ReadField8(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 9:
if fieldTypeId == thrift.STRUCT {
if err = p.ReadField9(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 10:
if fieldTypeId == thrift.STRING {
if err = p.ReadField10(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 11:
if fieldTypeId == thrift.I64 {
if err = p.ReadField11(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 12:
if fieldTypeId == thrift.I64 {
if err = p.ReadField12(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 13:
if fieldTypeId == thrift.I64 {
if err = p.ReadField13(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 14:
if fieldTypeId == thrift.BOOL {
if err = p.ReadField14(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ case 15:
+ if fieldTypeId == thrift.BOOL {
+ if err = p.ReadField15(iprot); err != nil {
+ goto ReadFieldError
+ }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ case 16:
+ if fieldTypeId == thrift.BOOL {
+ if err = p.ReadField16(iprot); err != nil {
+ goto ReadFieldError
+ }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ case 17:
+ if fieldTypeId == thrift.I64 {
+ if err = p.ReadField17(iprot); err != nil {
+ goto ReadFieldError
+ }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
- case 15:
- if fieldTypeId == thrift.BOOL {
- if err = p.ReadField15(iprot); err != nil {
+ case 18:
+ if fieldTypeId == thrift.I64 {
+ if err = p.ReadField18(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
default:
if err = iprot.Skip(fieldTypeId); err != nil {
goto SkipFieldError
}
}
-
if err = iprot.ReadFieldEnd(); err != nil {
goto ReadFieldEndError
}
@@ -19834,118 +23060,176 @@ ReadStructEndError:
}
func (p *TTabletMetaInfo) ReadField1(iprot thrift.TProtocol) error {
+
+ var _field *types.TTabletId
if v, err := iprot.ReadI64(); err != nil {
return err
} else {
- p.TabletId = &v
+ _field = &v
}
+ p.TabletId = _field
return nil
}
-
func (p *TTabletMetaInfo) ReadField2(iprot thrift.TProtocol) error {
+
+ var _field *types.TSchemaHash
if v, err := iprot.ReadI32(); err != nil {
return err
} else {
- p.SchemaHash = &v
+ _field = &v
}
+ p.SchemaHash = _field
return nil
}
-
func (p *TTabletMetaInfo) ReadField3(iprot thrift.TProtocol) error {
+
+ var _field *types.TPartitionId
if v, err := iprot.ReadI64(); err != nil {
return err
} else {
- p.PartitionId = &v
+ _field = &v
}
+ p.PartitionId = _field
return nil
}
-
func (p *TTabletMetaInfo) ReadField5(iprot thrift.TProtocol) error {
+
+ var _field *bool
if v, err := iprot.ReadBool(); err != nil {
return err
} else {
- p.IsInMemory = &v
+ _field = &v
}
+ p.IsInMemory = _field
return nil
}
-
func (p *TTabletMetaInfo) ReadField7(iprot thrift.TProtocol) error {
+
+ var _field *int64
if v, err := iprot.ReadI64(); err != nil {
return err
} else {
- p.StoragePolicyId = &v
+ _field = &v
}
+ p.StoragePolicyId = _field
return nil
}
-
func (p *TTabletMetaInfo) ReadField8(iprot thrift.TProtocol) error {
+
+ var _field *types.TReplicaId
if v, err := iprot.ReadI64(); err != nil {
return err
} else {
- p.ReplicaId = &v
+ _field = &v
}
+ p.ReplicaId = _field
return nil
}
-
func (p *TTabletMetaInfo) ReadField9(iprot thrift.TProtocol) error {
- p.BinlogConfig = NewTBinlogConfig()
- if err := p.BinlogConfig.Read(iprot); err != nil {
+ _field := NewTBinlogConfig()
+ if err := _field.Read(iprot); err != nil {
return err
}
+ p.BinlogConfig = _field
return nil
}
-
func (p *TTabletMetaInfo) ReadField10(iprot thrift.TProtocol) error {
+
+ var _field *string
if v, err := iprot.ReadString(); err != nil {
return err
} else {
- p.CompactionPolicy = &v
+ _field = &v
}
+ p.CompactionPolicy = _field
return nil
}
-
func (p *TTabletMetaInfo) ReadField11(iprot thrift.TProtocol) error {
+
+ var _field *int64
if v, err := iprot.ReadI64(); err != nil {
return err
} else {
- p.TimeSeriesCompactionGoalSizeMbytes = &v
+ _field = &v
}
+ p.TimeSeriesCompactionGoalSizeMbytes = _field
return nil
}
-
func (p *TTabletMetaInfo) ReadField12(iprot thrift.TProtocol) error {
+
+ var _field *int64
if v, err := iprot.ReadI64(); err != nil {
return err
} else {
- p.TimeSeriesCompactionFileCountThreshold = &v
+ _field = &v
}
+ p.TimeSeriesCompactionFileCountThreshold = _field
return nil
}
-
func (p *TTabletMetaInfo) ReadField13(iprot thrift.TProtocol) error {
+
+ var _field *int64
if v, err := iprot.ReadI64(); err != nil {
return err
} else {
- p.TimeSeriesCompactionTimeThresholdSeconds = &v
+ _field = &v
}
+ p.TimeSeriesCompactionTimeThresholdSeconds = _field
return nil
}
-
func (p *TTabletMetaInfo) ReadField14(iprot thrift.TProtocol) error {
+
+ var _field *bool
if v, err := iprot.ReadBool(); err != nil {
return err
} else {
- p.EnableSingleReplicaCompaction = &v
+ _field = &v
}
+ p.EnableSingleReplicaCompaction = _field
return nil
}
-
func (p *TTabletMetaInfo) ReadField15(iprot thrift.TProtocol) error {
+
+ var _field *bool
+ if v, err := iprot.ReadBool(); err != nil {
+ return err
+ } else {
+ _field = &v
+ }
+ p.SkipWriteIndexOnLoad = _field
+ return nil
+}
+func (p *TTabletMetaInfo) ReadField16(iprot thrift.TProtocol) error {
+
+ var _field *bool
if v, err := iprot.ReadBool(); err != nil {
return err
} else {
- p.SkipWriteIndexOnLoad = &v
+ _field = &v
+ }
+ p.DisableAutoCompaction = _field
+ return nil
+}
+func (p *TTabletMetaInfo) ReadField17(iprot thrift.TProtocol) error {
+
+ var _field *int64
+ if v, err := iprot.ReadI64(); err != nil {
+ return err
+ } else {
+ _field = &v
+ }
+ p.TimeSeriesCompactionEmptyRowsetsThreshold = _field
+ return nil
+}
+func (p *TTabletMetaInfo) ReadField18(iprot thrift.TProtocol) error {
+
+ var _field *int64
+ if v, err := iprot.ReadI64(); err != nil {
+ return err
+ } else {
+ _field = &v
}
+ p.TimeSeriesCompactionLevelThreshold = _field
return nil
}
@@ -20007,7 +23291,18 @@ func (p *TTabletMetaInfo) Write(oprot thrift.TProtocol) (err error) {
fieldId = 15
goto WriteFieldError
}
-
+ if err = p.writeField16(oprot); err != nil {
+ fieldId = 16
+ goto WriteFieldError
+ }
+ if err = p.writeField17(oprot); err != nil {
+ fieldId = 17
+ goto WriteFieldError
+ }
+ if err = p.writeField18(oprot); err != nil {
+ fieldId = 18
+ goto WriteFieldError
+ }
}
if err = oprot.WriteFieldStop(); err != nil {
goto WriteFieldStopError
@@ -20273,11 +23568,69 @@ WriteFieldEndError:
return thrift.PrependError(fmt.Sprintf("%T write field 15 end error: ", p), err)
}
+func (p *TTabletMetaInfo) writeField16(oprot thrift.TProtocol) (err error) {
+ if p.IsSetDisableAutoCompaction() {
+ if err = oprot.WriteFieldBegin("disable_auto_compaction", thrift.BOOL, 16); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteBool(*p.DisableAutoCompaction); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 16 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 16 end error: ", p), err)
+}
+
+func (p *TTabletMetaInfo) writeField17(oprot thrift.TProtocol) (err error) {
+ if p.IsSetTimeSeriesCompactionEmptyRowsetsThreshold() {
+ if err = oprot.WriteFieldBegin("time_series_compaction_empty_rowsets_threshold", thrift.I64, 17); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteI64(*p.TimeSeriesCompactionEmptyRowsetsThreshold); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 17 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 17 end error: ", p), err)
+}
+
+func (p *TTabletMetaInfo) writeField18(oprot thrift.TProtocol) (err error) {
+ if p.IsSetTimeSeriesCompactionLevelThreshold() {
+ if err = oprot.WriteFieldBegin("time_series_compaction_level_threshold", thrift.I64, 18); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteI64(*p.TimeSeriesCompactionLevelThreshold); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 18 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 18 end error: ", p), err)
+}
+
func (p *TTabletMetaInfo) String() string {
if p == nil {
return ""
}
return fmt.Sprintf("TTabletMetaInfo(%+v)", *p)
+
}
func (p *TTabletMetaInfo) DeepEqual(ano *TTabletMetaInfo) bool {
@@ -20325,6 +23678,15 @@ func (p *TTabletMetaInfo) DeepEqual(ano *TTabletMetaInfo) bool {
if !p.Field15DeepEqual(ano.SkipWriteIndexOnLoad) {
return false
}
+ if !p.Field16DeepEqual(ano.DisableAutoCompaction) {
+ return false
+ }
+ if !p.Field17DeepEqual(ano.TimeSeriesCompactionEmptyRowsetsThreshold) {
+ return false
+ }
+ if !p.Field18DeepEqual(ano.TimeSeriesCompactionLevelThreshold) {
+ return false
+ }
return true
}
@@ -20479,6 +23841,42 @@ func (p *TTabletMetaInfo) Field15DeepEqual(src *bool) bool {
}
return true
}
+func (p *TTabletMetaInfo) Field16DeepEqual(src *bool) bool {
+
+ if p.DisableAutoCompaction == src {
+ return true
+ } else if p.DisableAutoCompaction == nil || src == nil {
+ return false
+ }
+ if *p.DisableAutoCompaction != *src {
+ return false
+ }
+ return true
+}
+func (p *TTabletMetaInfo) Field17DeepEqual(src *int64) bool {
+
+ if p.TimeSeriesCompactionEmptyRowsetsThreshold == src {
+ return true
+ } else if p.TimeSeriesCompactionEmptyRowsetsThreshold == nil || src == nil {
+ return false
+ }
+ if *p.TimeSeriesCompactionEmptyRowsetsThreshold != *src {
+ return false
+ }
+ return true
+}
+func (p *TTabletMetaInfo) Field18DeepEqual(src *int64) bool {
+
+ if p.TimeSeriesCompactionLevelThreshold == src {
+ return true
+ } else if p.TimeSeriesCompactionLevelThreshold == nil || src == nil {
+ return false
+ }
+ if *p.TimeSeriesCompactionLevelThreshold != *src {
+ return false
+ }
+ return true
+}
type TUpdateTabletMetaInfoReq struct {
TabletMetaInfos []*TTabletMetaInfo `thrift:"tabletMetaInfos,1,optional" frugal:"1,optional,list" json:"tabletMetaInfos,omitempty"`
@@ -20489,7 +23887,6 @@ func NewTUpdateTabletMetaInfoReq() *TUpdateTabletMetaInfoReq {
}
func (p *TUpdateTabletMetaInfoReq) InitDefault() {
- *p = TUpdateTabletMetaInfoReq{}
}
var TUpdateTabletMetaInfoReq_TabletMetaInfos_DEFAULT []*TTabletMetaInfo
@@ -20536,17 +23933,14 @@ func (p *TUpdateTabletMetaInfoReq) Read(iprot thrift.TProtocol) (err error) {
if err = p.ReadField1(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
default:
if err = iprot.Skip(fieldTypeId); err != nil {
goto SkipFieldError
}
}
-
if err = iprot.ReadFieldEnd(); err != nil {
goto ReadFieldEndError
}
@@ -20576,18 +23970,22 @@ func (p *TUpdateTabletMetaInfoReq) ReadField1(iprot thrift.TProtocol) error {
if err != nil {
return err
}
- p.TabletMetaInfos = make([]*TTabletMetaInfo, 0, size)
+ _field := make([]*TTabletMetaInfo, 0, size)
+ values := make([]TTabletMetaInfo, size)
for i := 0; i < size; i++ {
- _elem := NewTTabletMetaInfo()
+ _elem := &values[i]
+ _elem.InitDefault()
+
if err := _elem.Read(iprot); err != nil {
return err
}
- p.TabletMetaInfos = append(p.TabletMetaInfos, _elem)
+ _field = append(_field, _elem)
}
if err := iprot.ReadListEnd(); err != nil {
return err
}
+ p.TabletMetaInfos = _field
return nil
}
@@ -20601,7 +23999,6 @@ func (p *TUpdateTabletMetaInfoReq) Write(oprot thrift.TProtocol) (err error) {
fieldId = 1
goto WriteFieldError
}
-
}
if err = oprot.WriteFieldStop(); err != nil {
goto WriteFieldStopError
@@ -20652,6 +24049,7 @@ func (p *TUpdateTabletMetaInfoReq) String() string {
return ""
}
return fmt.Sprintf("TUpdateTabletMetaInfoReq(%+v)", *p)
+
}
func (p *TUpdateTabletMetaInfoReq) DeepEqual(ano *TUpdateTabletMetaInfoReq) bool {
@@ -20692,7 +24090,6 @@ func NewTPluginMetaInfo() *TPluginMetaInfo {
}
func (p *TPluginMetaInfo) InitDefault() {
- *p = TPluginMetaInfo{}
}
func (p *TPluginMetaInfo) GetName() (v string) {
@@ -20775,10 +24172,8 @@ func (p *TPluginMetaInfo) Read(iprot thrift.TProtocol) (err error) {
goto ReadFieldError
}
issetName = true
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 2:
if fieldTypeId == thrift.I32 {
@@ -20786,37 +24181,30 @@ func (p *TPluginMetaInfo) Read(iprot thrift.TProtocol) (err error) {
goto ReadFieldError
}
issetType = true
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 3:
if fieldTypeId == thrift.STRING {
if err = p.ReadField3(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 4:
if fieldTypeId == thrift.STRING {
if err = p.ReadField4(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
default:
if err = iprot.Skip(fieldTypeId); err != nil {
goto SkipFieldError
}
}
-
if err = iprot.ReadFieldEnd(); err != nil {
goto ReadFieldEndError
}
@@ -20853,38 +24241,47 @@ RequiredFieldNotSetError:
}
func (p *TPluginMetaInfo) ReadField1(iprot thrift.TProtocol) error {
+
+ var _field string
if v, err := iprot.ReadString(); err != nil {
return err
} else {
- p.Name = v
+ _field = v
}
+ p.Name = _field
return nil
}
-
func (p *TPluginMetaInfo) ReadField2(iprot thrift.TProtocol) error {
+
+ var _field int32
if v, err := iprot.ReadI32(); err != nil {
return err
} else {
- p.Type = v
+ _field = v
}
+ p.Type = _field
return nil
}
-
func (p *TPluginMetaInfo) ReadField3(iprot thrift.TProtocol) error {
+
+ var _field *string
if v, err := iprot.ReadString(); err != nil {
return err
} else {
- p.SoName = &v
+ _field = &v
}
+ p.SoName = _field
return nil
}
-
func (p *TPluginMetaInfo) ReadField4(iprot thrift.TProtocol) error {
+
+ var _field *string
if v, err := iprot.ReadString(); err != nil {
return err
} else {
- p.Source = &v
+ _field = &v
}
+ p.Source = _field
return nil
}
@@ -20910,7 +24307,6 @@ func (p *TPluginMetaInfo) Write(oprot thrift.TProtocol) (err error) {
fieldId = 4
goto WriteFieldError
}
-
}
if err = oprot.WriteFieldStop(); err != nil {
goto WriteFieldStopError
@@ -21006,6 +24402,7 @@ func (p *TPluginMetaInfo) String() string {
return ""
}
return fmt.Sprintf("TPluginMetaInfo(%+v)", *p)
+
}
func (p *TPluginMetaInfo) DeepEqual(ano *TPluginMetaInfo) bool {
@@ -21079,7 +24476,6 @@ func NewTCooldownConf() *TCooldownConf {
}
func (p *TCooldownConf) InitDefault() {
- *p = TCooldownConf{}
}
func (p *TCooldownConf) GetTabletId() (v types.TTabletId) {
@@ -21153,37 +24549,30 @@ func (p *TCooldownConf) Read(iprot thrift.TProtocol) (err error) {
goto ReadFieldError
}
issetTabletId = true
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 2:
if fieldTypeId == thrift.I64 {
if err = p.ReadField2(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 3:
if fieldTypeId == thrift.I64 {
if err = p.ReadField3(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
default:
if err = iprot.Skip(fieldTypeId); err != nil {
goto SkipFieldError
}
}
-
if err = iprot.ReadFieldEnd(); err != nil {
goto ReadFieldEndError
}
@@ -21215,29 +24604,36 @@ RequiredFieldNotSetError:
}
func (p *TCooldownConf) ReadField1(iprot thrift.TProtocol) error {
+
+ var _field types.TTabletId
if v, err := iprot.ReadI64(); err != nil {
return err
} else {
- p.TabletId = v
+ _field = v
}
+ p.TabletId = _field
return nil
}
-
func (p *TCooldownConf) ReadField2(iprot thrift.TProtocol) error {
+
+ var _field *types.TReplicaId
if v, err := iprot.ReadI64(); err != nil {
return err
} else {
- p.CooldownReplicaId = &v
+ _field = &v
}
+ p.CooldownReplicaId = _field
return nil
}
-
func (p *TCooldownConf) ReadField3(iprot thrift.TProtocol) error {
+
+ var _field *int64
if v, err := iprot.ReadI64(); err != nil {
return err
} else {
- p.CooldownTerm = &v
+ _field = &v
}
+ p.CooldownTerm = _field
return nil
}
@@ -21259,7 +24655,6 @@ func (p *TCooldownConf) Write(oprot thrift.TProtocol) (err error) {
fieldId = 3
goto WriteFieldError
}
-
}
if err = oprot.WriteFieldStop(); err != nil {
goto WriteFieldStopError
@@ -21338,6 +24733,7 @@ func (p *TCooldownConf) String() string {
return ""
}
return fmt.Sprintf("TCooldownConf(%+v)", *p)
+
}
func (p *TCooldownConf) DeepEqual(ano *TCooldownConf) bool {
@@ -21399,7 +24795,6 @@ func NewTPushCooldownConfReq() *TPushCooldownConfReq {
}
func (p *TPushCooldownConfReq) InitDefault() {
- *p = TPushCooldownConfReq{}
}
func (p *TPushCooldownConfReq) GetCooldownConfs() (v []*TCooldownConf) {
@@ -21439,17 +24834,14 @@ func (p *TPushCooldownConfReq) Read(iprot thrift.TProtocol) (err error) {
goto ReadFieldError
}
issetCooldownConfs = true
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
default:
if err = iprot.Skip(fieldTypeId); err != nil {
goto SkipFieldError
}
}
-
if err = iprot.ReadFieldEnd(); err != nil {
goto ReadFieldEndError
}
@@ -21485,18 +24877,22 @@ func (p *TPushCooldownConfReq) ReadField1(iprot thrift.TProtocol) error {
if err != nil {
return err
}
- p.CooldownConfs = make([]*TCooldownConf, 0, size)
+ _field := make([]*TCooldownConf, 0, size)
+ values := make([]TCooldownConf, size)
for i := 0; i < size; i++ {
- _elem := NewTCooldownConf()
+ _elem := &values[i]
+ _elem.InitDefault()
+
if err := _elem.Read(iprot); err != nil {
return err
}
- p.CooldownConfs = append(p.CooldownConfs, _elem)
+ _field = append(_field, _elem)
}
if err := iprot.ReadListEnd(); err != nil {
return err
}
+ p.CooldownConfs = _field
return nil
}
@@ -21510,7 +24906,6 @@ func (p *TPushCooldownConfReq) Write(oprot thrift.TProtocol) (err error) {
fieldId = 1
goto WriteFieldError
}
-
}
if err = oprot.WriteFieldStop(); err != nil {
goto WriteFieldStopError
@@ -21559,6 +24954,7 @@ func (p *TPushCooldownConfReq) String() string {
return ""
}
return fmt.Sprintf("TPushCooldownConfReq(%+v)", *p)
+
}
func (p *TPushCooldownConfReq) DeepEqual(ano *TPushCooldownConfReq) bool {
@@ -21620,6 +25016,10 @@ type TAgentTaskRequest struct {
PushStoragePolicyReq *TPushStoragePolicyReq `thrift:"push_storage_policy_req,31,optional" frugal:"31,optional,TPushStoragePolicyReq" json:"push_storage_policy_req,omitempty"`
AlterInvertedIndexReq *TAlterInvertedIndexReq `thrift:"alter_inverted_index_req,32,optional" frugal:"32,optional,TAlterInvertedIndexReq" json:"alter_inverted_index_req,omitempty"`
GcBinlogReq *TGcBinlogReq `thrift:"gc_binlog_req,33,optional" frugal:"33,optional,TGcBinlogReq" json:"gc_binlog_req,omitempty"`
+ CleanTrashReq *TCleanTrashReq `thrift:"clean_trash_req,34,optional" frugal:"34,optional,TCleanTrashReq" json:"clean_trash_req,omitempty"`
+ VisibleVersionReq *TVisibleVersionReq `thrift:"visible_version_req,35,optional" frugal:"35,optional,TVisibleVersionReq" json:"visible_version_req,omitempty"`
+ CleanUdfCacheReq *TCleanUDFCacheReq `thrift:"clean_udf_cache_req,36,optional" frugal:"36,optional,TCleanUDFCacheReq" json:"clean_udf_cache_req,omitempty"`
+ CalcDeleteBitmapReq *TCalcDeleteBitmapRequest `thrift:"calc_delete_bitmap_req,1000,optional" frugal:"1000,optional,TCalcDeleteBitmapRequest" json:"calc_delete_bitmap_req,omitempty"`
}
func NewTAgentTaskRequest() *TAgentTaskRequest {
@@ -21627,7 +25027,6 @@ func NewTAgentTaskRequest() *TAgentTaskRequest {
}
func (p *TAgentTaskRequest) InitDefault() {
- *p = TAgentTaskRequest{}
}
func (p *TAgentTaskRequest) GetProtocolVersion() (v TAgentServiceVersion) {
@@ -21902,6 +25301,42 @@ func (p *TAgentTaskRequest) GetGcBinlogReq() (v *TGcBinlogReq) {
}
return p.GcBinlogReq
}
+
+var TAgentTaskRequest_CleanTrashReq_DEFAULT *TCleanTrashReq
+
+func (p *TAgentTaskRequest) GetCleanTrashReq() (v *TCleanTrashReq) {
+ if !p.IsSetCleanTrashReq() {
+ return TAgentTaskRequest_CleanTrashReq_DEFAULT
+ }
+ return p.CleanTrashReq
+}
+
+var TAgentTaskRequest_VisibleVersionReq_DEFAULT *TVisibleVersionReq
+
+func (p *TAgentTaskRequest) GetVisibleVersionReq() (v *TVisibleVersionReq) {
+ if !p.IsSetVisibleVersionReq() {
+ return TAgentTaskRequest_VisibleVersionReq_DEFAULT
+ }
+ return p.VisibleVersionReq
+}
+
+var TAgentTaskRequest_CleanUdfCacheReq_DEFAULT *TCleanUDFCacheReq
+
+func (p *TAgentTaskRequest) GetCleanUdfCacheReq() (v *TCleanUDFCacheReq) {
+ if !p.IsSetCleanUdfCacheReq() {
+ return TAgentTaskRequest_CleanUdfCacheReq_DEFAULT
+ }
+ return p.CleanUdfCacheReq
+}
+
+var TAgentTaskRequest_CalcDeleteBitmapReq_DEFAULT *TCalcDeleteBitmapRequest
+
+func (p *TAgentTaskRequest) GetCalcDeleteBitmapReq() (v *TCalcDeleteBitmapRequest) {
+ if !p.IsSetCalcDeleteBitmapReq() {
+ return TAgentTaskRequest_CalcDeleteBitmapReq_DEFAULT
+ }
+ return p.CalcDeleteBitmapReq
+}
func (p *TAgentTaskRequest) SetProtocolVersion(val TAgentServiceVersion) {
p.ProtocolVersion = val
}
@@ -21998,40 +25433,56 @@ func (p *TAgentTaskRequest) SetAlterInvertedIndexReq(val *TAlterInvertedIndexReq
func (p *TAgentTaskRequest) SetGcBinlogReq(val *TGcBinlogReq) {
p.GcBinlogReq = val
}
+func (p *TAgentTaskRequest) SetCleanTrashReq(val *TCleanTrashReq) {
+ p.CleanTrashReq = val
+}
+func (p *TAgentTaskRequest) SetVisibleVersionReq(val *TVisibleVersionReq) {
+ p.VisibleVersionReq = val
+}
+func (p *TAgentTaskRequest) SetCleanUdfCacheReq(val *TCleanUDFCacheReq) {
+ p.CleanUdfCacheReq = val
+}
+func (p *TAgentTaskRequest) SetCalcDeleteBitmapReq(val *TCalcDeleteBitmapRequest) {
+ p.CalcDeleteBitmapReq = val
+}
var fieldIDToName_TAgentTaskRequest = map[int16]string{
- 1: "protocol_version",
- 2: "task_type",
- 3: "signature",
- 4: "priority",
- 5: "create_tablet_req",
- 6: "drop_tablet_req",
- 7: "alter_tablet_req",
- 8: "clone_req",
- 9: "push_req",
- 10: "cancel_delete_data_req",
- 11: "resource_info",
- 12: "storage_medium_migrate_req",
- 13: "check_consistency_req",
- 14: "upload_req",
- 15: "download_req",
- 16: "snapshot_req",
- 17: "release_snapshot_req",
- 18: "clear_remote_file_req",
- 19: "publish_version_req",
- 20: "clear_alter_task_req",
- 21: "clear_transaction_task_req",
- 22: "move_dir_req",
- 23: "recover_tablet_req",
- 24: "alter_tablet_req_v2",
- 25: "recv_time",
- 26: "update_tablet_meta_info_req",
- 27: "compaction_req",
- 28: "storage_migration_req_v2",
- 30: "push_cooldown_conf",
- 31: "push_storage_policy_req",
- 32: "alter_inverted_index_req",
- 33: "gc_binlog_req",
+ 1: "protocol_version",
+ 2: "task_type",
+ 3: "signature",
+ 4: "priority",
+ 5: "create_tablet_req",
+ 6: "drop_tablet_req",
+ 7: "alter_tablet_req",
+ 8: "clone_req",
+ 9: "push_req",
+ 10: "cancel_delete_data_req",
+ 11: "resource_info",
+ 12: "storage_medium_migrate_req",
+ 13: "check_consistency_req",
+ 14: "upload_req",
+ 15: "download_req",
+ 16: "snapshot_req",
+ 17: "release_snapshot_req",
+ 18: "clear_remote_file_req",
+ 19: "publish_version_req",
+ 20: "clear_alter_task_req",
+ 21: "clear_transaction_task_req",
+ 22: "move_dir_req",
+ 23: "recover_tablet_req",
+ 24: "alter_tablet_req_v2",
+ 25: "recv_time",
+ 26: "update_tablet_meta_info_req",
+ 27: "compaction_req",
+ 28: "storage_migration_req_v2",
+ 30: "push_cooldown_conf",
+ 31: "push_storage_policy_req",
+ 32: "alter_inverted_index_req",
+ 33: "gc_binlog_req",
+ 34: "clean_trash_req",
+ 35: "visible_version_req",
+ 36: "clean_udf_cache_req",
+ 1000: "calc_delete_bitmap_req",
}
func (p *TAgentTaskRequest) IsSetPriority() bool {
@@ -22150,6 +25601,22 @@ func (p *TAgentTaskRequest) IsSetGcBinlogReq() bool {
return p.GcBinlogReq != nil
}
+func (p *TAgentTaskRequest) IsSetCleanTrashReq() bool {
+ return p.CleanTrashReq != nil
+}
+
+func (p *TAgentTaskRequest) IsSetVisibleVersionReq() bool {
+ return p.VisibleVersionReq != nil
+}
+
+func (p *TAgentTaskRequest) IsSetCleanUdfCacheReq() bool {
+ return p.CleanUdfCacheReq != nil
+}
+
+func (p *TAgentTaskRequest) IsSetCalcDeleteBitmapReq() bool {
+ return p.CalcDeleteBitmapReq != nil
+}
+
func (p *TAgentTaskRequest) Read(iprot thrift.TProtocol) (err error) {
var fieldTypeId thrift.TType
@@ -22178,10 +25645,8 @@ func (p *TAgentTaskRequest) Read(iprot thrift.TProtocol) (err error) {
goto ReadFieldError
}
issetProtocolVersion = true
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 2:
if fieldTypeId == thrift.I32 {
@@ -22189,10 +25654,8 @@ func (p *TAgentTaskRequest) Read(iprot thrift.TProtocol) (err error) {
goto ReadFieldError
}
issetTaskType = true
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 3:
if fieldTypeId == thrift.I64 {
@@ -22200,307 +25663,278 @@ func (p *TAgentTaskRequest) Read(iprot thrift.TProtocol) (err error) {
goto ReadFieldError
}
issetSignature = true
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 4:
if fieldTypeId == thrift.I32 {
if err = p.ReadField4(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 5:
if fieldTypeId == thrift.STRUCT {
if err = p.ReadField5(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 6:
if fieldTypeId == thrift.STRUCT {
if err = p.ReadField6(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 7:
if fieldTypeId == thrift.STRUCT {
if err = p.ReadField7(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 8:
if fieldTypeId == thrift.STRUCT {
if err = p.ReadField8(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 9:
if fieldTypeId == thrift.STRUCT {
if err = p.ReadField9(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 10:
if fieldTypeId == thrift.STRUCT {
if err = p.ReadField10(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 11:
if fieldTypeId == thrift.STRUCT {
if err = p.ReadField11(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 12:
if fieldTypeId == thrift.STRUCT {
if err = p.ReadField12(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 13:
if fieldTypeId == thrift.STRUCT {
if err = p.ReadField13(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 14:
if fieldTypeId == thrift.STRUCT {
if err = p.ReadField14(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 15:
if fieldTypeId == thrift.STRUCT {
if err = p.ReadField15(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 16:
if fieldTypeId == thrift.STRUCT {
if err = p.ReadField16(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 17:
if fieldTypeId == thrift.STRUCT {
if err = p.ReadField17(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 18:
if fieldTypeId == thrift.STRUCT {
if err = p.ReadField18(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 19:
if fieldTypeId == thrift.STRUCT {
if err = p.ReadField19(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 20:
if fieldTypeId == thrift.STRUCT {
if err = p.ReadField20(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 21:
if fieldTypeId == thrift.STRUCT {
if err = p.ReadField21(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 22:
if fieldTypeId == thrift.STRUCT {
if err = p.ReadField22(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 23:
if fieldTypeId == thrift.STRUCT {
if err = p.ReadField23(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 24:
if fieldTypeId == thrift.STRUCT {
if err = p.ReadField24(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 25:
if fieldTypeId == thrift.I64 {
if err = p.ReadField25(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 26:
if fieldTypeId == thrift.STRUCT {
if err = p.ReadField26(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 27:
if fieldTypeId == thrift.STRUCT {
if err = p.ReadField27(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 28:
if fieldTypeId == thrift.STRUCT {
if err = p.ReadField28(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 30:
if fieldTypeId == thrift.STRUCT {
if err = p.ReadField30(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 31:
if fieldTypeId == thrift.STRUCT {
if err = p.ReadField31(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 32:
if fieldTypeId == thrift.STRUCT {
if err = p.ReadField32(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 33:
if fieldTypeId == thrift.STRUCT {
if err = p.ReadField33(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ case 34:
+ if fieldTypeId == thrift.STRUCT {
+ if err = p.ReadField34(iprot); err != nil {
+ goto ReadFieldError
+ }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ case 35:
+ if fieldTypeId == thrift.STRUCT {
+ if err = p.ReadField35(iprot); err != nil {
+ goto ReadFieldError
+ }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ case 36:
+ if fieldTypeId == thrift.STRUCT {
+ if err = p.ReadField36(iprot); err != nil {
+ goto ReadFieldError
+ }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ case 1000:
+ if fieldTypeId == thrift.STRUCT {
+ if err = p.ReadField1000(iprot); err != nil {
+ goto ReadFieldError
}
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
default:
if err = iprot.Skip(fieldTypeId); err != nil {
goto SkipFieldError
}
}
-
if err = iprot.ReadFieldEnd(); err != nil {
goto ReadFieldEndError
}
@@ -22542,264 +25976,307 @@ RequiredFieldNotSetError:
}
func (p *TAgentTaskRequest) ReadField1(iprot thrift.TProtocol) error {
+
+ var _field TAgentServiceVersion
if v, err := iprot.ReadI32(); err != nil {
return err
} else {
- p.ProtocolVersion = TAgentServiceVersion(v)
+ _field = TAgentServiceVersion(v)
}
+ p.ProtocolVersion = _field
return nil
}
-
func (p *TAgentTaskRequest) ReadField2(iprot thrift.TProtocol) error {
+
+ var _field types.TTaskType
if v, err := iprot.ReadI32(); err != nil {
return err
} else {
- p.TaskType = types.TTaskType(v)
+ _field = types.TTaskType(v)
}
+ p.TaskType = _field
return nil
}
-
func (p *TAgentTaskRequest) ReadField3(iprot thrift.TProtocol) error {
+
+ var _field int64
if v, err := iprot.ReadI64(); err != nil {
return err
} else {
- p.Signature = v
+ _field = v
}
+ p.Signature = _field
return nil
}
-
func (p *TAgentTaskRequest) ReadField4(iprot thrift.TProtocol) error {
+
+ var _field *types.TPriority
if v, err := iprot.ReadI32(); err != nil {
return err
} else {
tmp := types.TPriority(v)
- p.Priority = &tmp
+ _field = &tmp
}
+ p.Priority = _field
return nil
}
-
func (p *TAgentTaskRequest) ReadField5(iprot thrift.TProtocol) error {
- p.CreateTabletReq = NewTCreateTabletReq()
- if err := p.CreateTabletReq.Read(iprot); err != nil {
+ _field := NewTCreateTabletReq()
+ if err := _field.Read(iprot); err != nil {
return err
}
+ p.CreateTabletReq = _field
return nil
}
-
func (p *TAgentTaskRequest) ReadField6(iprot thrift.TProtocol) error {
- p.DropTabletReq = NewTDropTabletReq()
- if err := p.DropTabletReq.Read(iprot); err != nil {
+ _field := NewTDropTabletReq()
+ if err := _field.Read(iprot); err != nil {
return err
}
+ p.DropTabletReq = _field
return nil
}
-
func (p *TAgentTaskRequest) ReadField7(iprot thrift.TProtocol) error {
- p.AlterTabletReq = NewTAlterTabletReq()
- if err := p.AlterTabletReq.Read(iprot); err != nil {
+ _field := NewTAlterTabletReq()
+ if err := _field.Read(iprot); err != nil {
return err
}
+ p.AlterTabletReq = _field
return nil
}
-
func (p *TAgentTaskRequest) ReadField8(iprot thrift.TProtocol) error {
- p.CloneReq = NewTCloneReq()
- if err := p.CloneReq.Read(iprot); err != nil {
+ _field := NewTCloneReq()
+ if err := _field.Read(iprot); err != nil {
return err
}
+ p.CloneReq = _field
return nil
}
-
func (p *TAgentTaskRequest) ReadField9(iprot thrift.TProtocol) error {
- p.PushReq = NewTPushReq()
- if err := p.PushReq.Read(iprot); err != nil {
+ _field := NewTPushReq()
+ if err := _field.Read(iprot); err != nil {
return err
}
+ p.PushReq = _field
return nil
}
-
func (p *TAgentTaskRequest) ReadField10(iprot thrift.TProtocol) error {
- p.CancelDeleteDataReq = NewTCancelDeleteDataReq()
- if err := p.CancelDeleteDataReq.Read(iprot); err != nil {
+ _field := NewTCancelDeleteDataReq()
+ if err := _field.Read(iprot); err != nil {
return err
}
+ p.CancelDeleteDataReq = _field
return nil
}
-
func (p *TAgentTaskRequest) ReadField11(iprot thrift.TProtocol) error {
- p.ResourceInfo = types.NewTResourceInfo()
- if err := p.ResourceInfo.Read(iprot); err != nil {
+ _field := types.NewTResourceInfo()
+ if err := _field.Read(iprot); err != nil {
return err
}
+ p.ResourceInfo = _field
return nil
}
-
func (p *TAgentTaskRequest) ReadField12(iprot thrift.TProtocol) error {
- p.StorageMediumMigrateReq = NewTStorageMediumMigrateReq()
- if err := p.StorageMediumMigrateReq.Read(iprot); err != nil {
+ _field := NewTStorageMediumMigrateReq()
+ if err := _field.Read(iprot); err != nil {
return err
}
+ p.StorageMediumMigrateReq = _field
return nil
}
-
func (p *TAgentTaskRequest) ReadField13(iprot thrift.TProtocol) error {
- p.CheckConsistencyReq = NewTCheckConsistencyReq()
- if err := p.CheckConsistencyReq.Read(iprot); err != nil {
+ _field := NewTCheckConsistencyReq()
+ if err := _field.Read(iprot); err != nil {
return err
}
+ p.CheckConsistencyReq = _field
return nil
}
-
func (p *TAgentTaskRequest) ReadField14(iprot thrift.TProtocol) error {
- p.UploadReq = NewTUploadReq()
- if err := p.UploadReq.Read(iprot); err != nil {
+ _field := NewTUploadReq()
+ if err := _field.Read(iprot); err != nil {
return err
}
+ p.UploadReq = _field
return nil
}
-
func (p *TAgentTaskRequest) ReadField15(iprot thrift.TProtocol) error {
- p.DownloadReq = NewTDownloadReq()
- if err := p.DownloadReq.Read(iprot); err != nil {
+ _field := NewTDownloadReq()
+ if err := _field.Read(iprot); err != nil {
return err
}
+ p.DownloadReq = _field
return nil
}
-
func (p *TAgentTaskRequest) ReadField16(iprot thrift.TProtocol) error {
- p.SnapshotReq = NewTSnapshotRequest()
- if err := p.SnapshotReq.Read(iprot); err != nil {
+ _field := NewTSnapshotRequest()
+ if err := _field.Read(iprot); err != nil {
return err
}
+ p.SnapshotReq = _field
return nil
}
-
func (p *TAgentTaskRequest) ReadField17(iprot thrift.TProtocol) error {
- p.ReleaseSnapshotReq = NewTReleaseSnapshotRequest()
- if err := p.ReleaseSnapshotReq.Read(iprot); err != nil {
+ _field := NewTReleaseSnapshotRequest()
+ if err := _field.Read(iprot); err != nil {
return err
}
+ p.ReleaseSnapshotReq = _field
return nil
}
-
func (p *TAgentTaskRequest) ReadField18(iprot thrift.TProtocol) error {
- p.ClearRemoteFileReq = NewTClearRemoteFileReq()
- if err := p.ClearRemoteFileReq.Read(iprot); err != nil {
+ _field := NewTClearRemoteFileReq()
+ if err := _field.Read(iprot); err != nil {
return err
}
+ p.ClearRemoteFileReq = _field
return nil
}
-
func (p *TAgentTaskRequest) ReadField19(iprot thrift.TProtocol) error {
- p.PublishVersionReq = NewTPublishVersionRequest()
- if err := p.PublishVersionReq.Read(iprot); err != nil {
+ _field := NewTPublishVersionRequest()
+ if err := _field.Read(iprot); err != nil {
return err
}
+ p.PublishVersionReq = _field
return nil
}
-
func (p *TAgentTaskRequest) ReadField20(iprot thrift.TProtocol) error {
- p.ClearAlterTaskReq = NewTClearAlterTaskRequest()
- if err := p.ClearAlterTaskReq.Read(iprot); err != nil {
+ _field := NewTClearAlterTaskRequest()
+ if err := _field.Read(iprot); err != nil {
return err
}
+ p.ClearAlterTaskReq = _field
return nil
}
-
func (p *TAgentTaskRequest) ReadField21(iprot thrift.TProtocol) error {
- p.ClearTransactionTaskReq = NewTClearTransactionTaskRequest()
- if err := p.ClearTransactionTaskReq.Read(iprot); err != nil {
+ _field := NewTClearTransactionTaskRequest()
+ if err := _field.Read(iprot); err != nil {
return err
}
+ p.ClearTransactionTaskReq = _field
return nil
}
-
func (p *TAgentTaskRequest) ReadField22(iprot thrift.TProtocol) error {
- p.MoveDirReq = NewTMoveDirReq()
- if err := p.MoveDirReq.Read(iprot); err != nil {
+ _field := NewTMoveDirReq()
+ if err := _field.Read(iprot); err != nil {
return err
}
+ p.MoveDirReq = _field
return nil
}
-
func (p *TAgentTaskRequest) ReadField23(iprot thrift.TProtocol) error {
- p.RecoverTabletReq = NewTRecoverTabletReq()
- if err := p.RecoverTabletReq.Read(iprot); err != nil {
+ _field := NewTRecoverTabletReq()
+ if err := _field.Read(iprot); err != nil {
return err
}
+ p.RecoverTabletReq = _field
return nil
}
-
func (p *TAgentTaskRequest) ReadField24(iprot thrift.TProtocol) error {
- p.AlterTabletReqV2 = NewTAlterTabletReqV2()
- if err := p.AlterTabletReqV2.Read(iprot); err != nil {
+ _field := NewTAlterTabletReqV2()
+ if err := _field.Read(iprot); err != nil {
return err
}
+ p.AlterTabletReqV2 = _field
return nil
}
-
func (p *TAgentTaskRequest) ReadField25(iprot thrift.TProtocol) error {
+
+ var _field *int64
if v, err := iprot.ReadI64(); err != nil {
return err
} else {
- p.RecvTime = &v
+ _field = &v
}
+ p.RecvTime = _field
return nil
}
-
func (p *TAgentTaskRequest) ReadField26(iprot thrift.TProtocol) error {
- p.UpdateTabletMetaInfoReq = NewTUpdateTabletMetaInfoReq()
- if err := p.UpdateTabletMetaInfoReq.Read(iprot); err != nil {
+ _field := NewTUpdateTabletMetaInfoReq()
+ if err := _field.Read(iprot); err != nil {
return err
}
+ p.UpdateTabletMetaInfoReq = _field
return nil
}
-
func (p *TAgentTaskRequest) ReadField27(iprot thrift.TProtocol) error {
- p.CompactionReq = NewTCompactionReq()
- if err := p.CompactionReq.Read(iprot); err != nil {
+ _field := NewTCompactionReq()
+ if err := _field.Read(iprot); err != nil {
return err
}
+ p.CompactionReq = _field
return nil
}
-
func (p *TAgentTaskRequest) ReadField28(iprot thrift.TProtocol) error {
- p.StorageMigrationReqV2 = NewTStorageMigrationReqV2()
- if err := p.StorageMigrationReqV2.Read(iprot); err != nil {
+ _field := NewTStorageMigrationReqV2()
+ if err := _field.Read(iprot); err != nil {
return err
}
+ p.StorageMigrationReqV2 = _field
return nil
}
-
func (p *TAgentTaskRequest) ReadField30(iprot thrift.TProtocol) error {
- p.PushCooldownConf = NewTPushCooldownConfReq()
- if err := p.PushCooldownConf.Read(iprot); err != nil {
+ _field := NewTPushCooldownConfReq()
+ if err := _field.Read(iprot); err != nil {
return err
}
+ p.PushCooldownConf = _field
return nil
}
-
func (p *TAgentTaskRequest) ReadField31(iprot thrift.TProtocol) error {
- p.PushStoragePolicyReq = NewTPushStoragePolicyReq()
- if err := p.PushStoragePolicyReq.Read(iprot); err != nil {
+ _field := NewTPushStoragePolicyReq()
+ if err := _field.Read(iprot); err != nil {
return err
}
+ p.PushStoragePolicyReq = _field
return nil
}
-
func (p *TAgentTaskRequest) ReadField32(iprot thrift.TProtocol) error {
- p.AlterInvertedIndexReq = NewTAlterInvertedIndexReq()
- if err := p.AlterInvertedIndexReq.Read(iprot); err != nil {
+ _field := NewTAlterInvertedIndexReq()
+ if err := _field.Read(iprot); err != nil {
return err
}
+ p.AlterInvertedIndexReq = _field
return nil
}
-
func (p *TAgentTaskRequest) ReadField33(iprot thrift.TProtocol) error {
- p.GcBinlogReq = NewTGcBinlogReq()
- if err := p.GcBinlogReq.Read(iprot); err != nil {
+ _field := NewTGcBinlogReq()
+ if err := _field.Read(iprot); err != nil {
+ return err
+ }
+ p.GcBinlogReq = _field
+ return nil
+}
+func (p *TAgentTaskRequest) ReadField34(iprot thrift.TProtocol) error {
+ _field := NewTCleanTrashReq()
+ if err := _field.Read(iprot); err != nil {
+ return err
+ }
+ p.CleanTrashReq = _field
+ return nil
+}
+func (p *TAgentTaskRequest) ReadField35(iprot thrift.TProtocol) error {
+ _field := NewTVisibleVersionReq()
+ if err := _field.Read(iprot); err != nil {
return err
}
+ p.VisibleVersionReq = _field
+ return nil
+}
+func (p *TAgentTaskRequest) ReadField36(iprot thrift.TProtocol) error {
+ _field := NewTCleanUDFCacheReq()
+ if err := _field.Read(iprot); err != nil {
+ return err
+ }
+ p.CleanUdfCacheReq = _field
+ return nil
+}
+func (p *TAgentTaskRequest) ReadField1000(iprot thrift.TProtocol) error {
+ _field := NewTCalcDeleteBitmapRequest()
+ if err := _field.Read(iprot); err != nil {
+ return err
+ }
+ p.CalcDeleteBitmapReq = _field
return nil
}
@@ -22937,7 +26414,22 @@ func (p *TAgentTaskRequest) Write(oprot thrift.TProtocol) (err error) {
fieldId = 33
goto WriteFieldError
}
-
+ if err = p.writeField34(oprot); err != nil {
+ fieldId = 34
+ goto WriteFieldError
+ }
+ if err = p.writeField35(oprot); err != nil {
+ fieldId = 35
+ goto WriteFieldError
+ }
+ if err = p.writeField36(oprot); err != nil {
+ fieldId = 36
+ goto WriteFieldError
+ }
+ if err = p.writeField1000(oprot); err != nil {
+ fieldId = 1000
+ goto WriteFieldError
+ }
}
if err = oprot.WriteFieldStop(); err != nil {
goto WriteFieldStopError
@@ -23558,11 +27050,88 @@ WriteFieldEndError:
return thrift.PrependError(fmt.Sprintf("%T write field 33 end error: ", p), err)
}
+func (p *TAgentTaskRequest) writeField34(oprot thrift.TProtocol) (err error) {
+ if p.IsSetCleanTrashReq() {
+ if err = oprot.WriteFieldBegin("clean_trash_req", thrift.STRUCT, 34); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := p.CleanTrashReq.Write(oprot); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 34 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 34 end error: ", p), err)
+}
+
+func (p *TAgentTaskRequest) writeField35(oprot thrift.TProtocol) (err error) {
+ if p.IsSetVisibleVersionReq() {
+ if err = oprot.WriteFieldBegin("visible_version_req", thrift.STRUCT, 35); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := p.VisibleVersionReq.Write(oprot); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 35 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 35 end error: ", p), err)
+}
+
+func (p *TAgentTaskRequest) writeField36(oprot thrift.TProtocol) (err error) {
+ if p.IsSetCleanUdfCacheReq() {
+ if err = oprot.WriteFieldBegin("clean_udf_cache_req", thrift.STRUCT, 36); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := p.CleanUdfCacheReq.Write(oprot); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 36 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 36 end error: ", p), err)
+}
+
+func (p *TAgentTaskRequest) writeField1000(oprot thrift.TProtocol) (err error) {
+ if p.IsSetCalcDeleteBitmapReq() {
+ if err = oprot.WriteFieldBegin("calc_delete_bitmap_req", thrift.STRUCT, 1000); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := p.CalcDeleteBitmapReq.Write(oprot); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 1000 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 1000 end error: ", p), err)
+}
+
func (p *TAgentTaskRequest) String() string {
if p == nil {
return ""
}
return fmt.Sprintf("TAgentTaskRequest(%+v)", *p)
+
}
func (p *TAgentTaskRequest) DeepEqual(ano *TAgentTaskRequest) bool {
@@ -23667,6 +27236,18 @@ func (p *TAgentTaskRequest) DeepEqual(ano *TAgentTaskRequest) bool {
if !p.Field33DeepEqual(ano.GcBinlogReq) {
return false
}
+ if !p.Field34DeepEqual(ano.CleanTrashReq) {
+ return false
+ }
+ if !p.Field35DeepEqual(ano.VisibleVersionReq) {
+ return false
+ }
+ if !p.Field36DeepEqual(ano.CleanUdfCacheReq) {
+ return false
+ }
+ if !p.Field1000DeepEqual(ano.CalcDeleteBitmapReq) {
+ return false
+ }
return true
}
@@ -23904,6 +27485,34 @@ func (p *TAgentTaskRequest) Field33DeepEqual(src *TGcBinlogReq) bool {
}
return true
}
+func (p *TAgentTaskRequest) Field34DeepEqual(src *TCleanTrashReq) bool {
+
+ if !p.CleanTrashReq.DeepEqual(src) {
+ return false
+ }
+ return true
+}
+func (p *TAgentTaskRequest) Field35DeepEqual(src *TVisibleVersionReq) bool {
+
+ if !p.VisibleVersionReq.DeepEqual(src) {
+ return false
+ }
+ return true
+}
+func (p *TAgentTaskRequest) Field36DeepEqual(src *TCleanUDFCacheReq) bool {
+
+ if !p.CleanUdfCacheReq.DeepEqual(src) {
+ return false
+ }
+ return true
+}
+func (p *TAgentTaskRequest) Field1000DeepEqual(src *TCalcDeleteBitmapRequest) bool {
+
+ if !p.CalcDeleteBitmapReq.DeepEqual(src) {
+ return false
+ }
+ return true
+}
type TAgentResult_ struct {
Status *status.TStatus `thrift:"status,1,required" frugal:"1,required,status.TStatus" json:"status"`
@@ -23920,10 +27529,7 @@ func NewTAgentResult_() *TAgentResult_ {
}
func (p *TAgentResult_) InitDefault() {
- *p = TAgentResult_{
-
- SnapshotVersion: 1,
- }
+ p.SnapshotVersion = 1
}
var TAgentResult__Status_DEFAULT *status.TStatus
@@ -24023,47 +27629,38 @@ func (p *TAgentResult_) Read(iprot thrift.TProtocol) (err error) {
goto ReadFieldError
}
issetStatus = true
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 2:
if fieldTypeId == thrift.STRING {
if err = p.ReadField2(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 3:
if fieldTypeId == thrift.BOOL {
if err = p.ReadField3(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 4:
if fieldTypeId == thrift.I32 {
if err = p.ReadField4(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
default:
if err = iprot.Skip(fieldTypeId); err != nil {
goto SkipFieldError
}
}
-
if err = iprot.ReadFieldEnd(); err != nil {
goto ReadFieldEndError
}
@@ -24095,37 +27692,44 @@ RequiredFieldNotSetError:
}
func (p *TAgentResult_) ReadField1(iprot thrift.TProtocol) error {
- p.Status = status.NewTStatus()
- if err := p.Status.Read(iprot); err != nil {
+ _field := status.NewTStatus()
+ if err := _field.Read(iprot); err != nil {
return err
}
+ p.Status = _field
return nil
}
-
func (p *TAgentResult_) ReadField2(iprot thrift.TProtocol) error {
+
+ var _field *string
if v, err := iprot.ReadString(); err != nil {
return err
} else {
- p.SnapshotPath = &v
+ _field = &v
}
+ p.SnapshotPath = _field
return nil
}
-
func (p *TAgentResult_) ReadField3(iprot thrift.TProtocol) error {
+
+ var _field *bool
if v, err := iprot.ReadBool(); err != nil {
return err
} else {
- p.AllowIncrementalClone = &v
+ _field = &v
}
+ p.AllowIncrementalClone = _field
return nil
}
-
func (p *TAgentResult_) ReadField4(iprot thrift.TProtocol) error {
+
+ var _field int32
if v, err := iprot.ReadI32(); err != nil {
return err
} else {
- p.SnapshotVersion = v
+ _field = v
}
+ p.SnapshotVersion = _field
return nil
}
@@ -24151,7 +27755,6 @@ func (p *TAgentResult_) Write(oprot thrift.TProtocol) (err error) {
fieldId = 4
goto WriteFieldError
}
-
}
if err = oprot.WriteFieldStop(); err != nil {
goto WriteFieldStopError
@@ -24249,6 +27852,7 @@ func (p *TAgentResult_) String() string {
return ""
}
return fmt.Sprintf("TAgentResult_(%+v)", *p)
+
}
func (p *TAgentResult_) DeepEqual(ano *TAgentResult_) bool {
@@ -24323,7 +27927,6 @@ func NewTTopicItem() *TTopicItem {
}
func (p *TTopicItem) InitDefault() {
- *p = TTopicItem{}
}
func (p *TTopicItem) GetKey() (v string) {
@@ -24414,47 +28017,38 @@ func (p *TTopicItem) Read(iprot thrift.TProtocol) (err error) {
goto ReadFieldError
}
issetKey = true
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 2:
if fieldTypeId == thrift.I64 {
if err = p.ReadField2(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 3:
if fieldTypeId == thrift.DOUBLE {
if err = p.ReadField3(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 4:
if fieldTypeId == thrift.STRING {
if err = p.ReadField4(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
default:
if err = iprot.Skip(fieldTypeId); err != nil {
goto SkipFieldError
}
}
-
if err = iprot.ReadFieldEnd(); err != nil {
goto ReadFieldEndError
}
@@ -24486,38 +28080,47 @@ RequiredFieldNotSetError:
}
func (p *TTopicItem) ReadField1(iprot thrift.TProtocol) error {
+
+ var _field string
if v, err := iprot.ReadString(); err != nil {
return err
} else {
- p.Key = v
+ _field = v
}
+ p.Key = _field
return nil
}
-
func (p *TTopicItem) ReadField2(iprot thrift.TProtocol) error {
+
+ var _field *int64
if v, err := iprot.ReadI64(); err != nil {
return err
} else {
- p.IntValue = &v
+ _field = &v
}
+ p.IntValue = _field
return nil
}
-
func (p *TTopicItem) ReadField3(iprot thrift.TProtocol) error {
+
+ var _field *float64
if v, err := iprot.ReadDouble(); err != nil {
return err
} else {
- p.DoubleValue = &v
+ _field = &v
}
+ p.DoubleValue = _field
return nil
}
-
func (p *TTopicItem) ReadField4(iprot thrift.TProtocol) error {
+
+ var _field *string
if v, err := iprot.ReadString(); err != nil {
return err
} else {
- p.StringValue = &v
+ _field = &v
}
+ p.StringValue = _field
return nil
}
@@ -24543,7 +28146,6 @@ func (p *TTopicItem) Write(oprot thrift.TProtocol) (err error) {
fieldId = 4
goto WriteFieldError
}
-
}
if err = oprot.WriteFieldStop(); err != nil {
goto WriteFieldStopError
@@ -24641,6 +28243,7 @@ func (p *TTopicItem) String() string {
return ""
}
return fmt.Sprintf("TTopicItem(%+v)", *p)
+
}
func (p *TTopicItem) DeepEqual(ano *TTopicItem) bool {
@@ -24719,7 +28322,6 @@ func NewTTopicUpdate() *TTopicUpdate {
}
func (p *TTopicUpdate) InitDefault() {
- *p = TTopicUpdate{}
}
func (p *TTopicUpdate) GetType() (v TTopicType) {
@@ -24793,37 +28395,30 @@ func (p *TTopicUpdate) Read(iprot thrift.TProtocol) (err error) {
goto ReadFieldError
}
issetType = true
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 2:
if fieldTypeId == thrift.LIST {
if err = p.ReadField2(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 3:
if fieldTypeId == thrift.LIST {
if err = p.ReadField3(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
default:
if err = iprot.Skip(fieldTypeId); err != nil {
goto SkipFieldError
}
}
-
if err = iprot.ReadFieldEnd(); err != nil {
goto ReadFieldEndError
}
@@ -24855,41 +28450,47 @@ RequiredFieldNotSetError:
}
func (p *TTopicUpdate) ReadField1(iprot thrift.TProtocol) error {
+
+ var _field TTopicType
if v, err := iprot.ReadI32(); err != nil {
return err
} else {
- p.Type = TTopicType(v)
+ _field = TTopicType(v)
}
+ p.Type = _field
return nil
}
-
func (p *TTopicUpdate) ReadField2(iprot thrift.TProtocol) error {
_, size, err := iprot.ReadListBegin()
if err != nil {
return err
}
- p.Updates = make([]*TTopicItem, 0, size)
+ _field := make([]*TTopicItem, 0, size)
+ values := make([]TTopicItem, size)
for i := 0; i < size; i++ {
- _elem := NewTTopicItem()
+ _elem := &values[i]
+ _elem.InitDefault()
+
if err := _elem.Read(iprot); err != nil {
return err
}
- p.Updates = append(p.Updates, _elem)
+ _field = append(_field, _elem)
}
if err := iprot.ReadListEnd(); err != nil {
return err
}
+ p.Updates = _field
return nil
}
-
func (p *TTopicUpdate) ReadField3(iprot thrift.TProtocol) error {
_, size, err := iprot.ReadListBegin()
if err != nil {
return err
}
- p.Deletes = make([]string, 0, size)
+ _field := make([]string, 0, size)
for i := 0; i < size; i++ {
+
var _elem string
if v, err := iprot.ReadString(); err != nil {
return err
@@ -24897,11 +28498,12 @@ func (p *TTopicUpdate) ReadField3(iprot thrift.TProtocol) error {
_elem = v
}
- p.Deletes = append(p.Deletes, _elem)
+ _field = append(_field, _elem)
}
if err := iprot.ReadListEnd(); err != nil {
return err
}
+ p.Deletes = _field
return nil
}
@@ -24923,7 +28525,6 @@ func (p *TTopicUpdate) Write(oprot thrift.TProtocol) (err error) {
fieldId = 3
goto WriteFieldError
}
-
}
if err = oprot.WriteFieldStop(); err != nil {
goto WriteFieldStopError
@@ -25018,6 +28619,7 @@ func (p *TTopicUpdate) String() string {
return ""
}
return fmt.Sprintf("TTopicUpdate(%+v)", *p)
+
}
func (p *TTopicUpdate) DeepEqual(ano *TTopicUpdate) bool {
@@ -25082,7 +28684,6 @@ func NewTAgentPublishRequest() *TAgentPublishRequest {
}
func (p *TAgentPublishRequest) InitDefault() {
- *p = TAgentPublishRequest{}
}
func (p *TAgentPublishRequest) GetProtocolVersion() (v TAgentServiceVersion) {
@@ -25131,10 +28732,8 @@ func (p *TAgentPublishRequest) Read(iprot thrift.TProtocol) (err error) {
goto ReadFieldError
}
issetProtocolVersion = true
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 2:
if fieldTypeId == thrift.LIST {
@@ -25142,17 +28741,14 @@ func (p *TAgentPublishRequest) Read(iprot thrift.TProtocol) (err error) {
goto ReadFieldError
}
issetUpdates = true
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
default:
if err = iprot.Skip(fieldTypeId); err != nil {
goto SkipFieldError
}
}
-
if err = iprot.ReadFieldEnd(); err != nil {
goto ReadFieldEndError
}
@@ -25189,31 +28785,37 @@ RequiredFieldNotSetError:
}
func (p *TAgentPublishRequest) ReadField1(iprot thrift.TProtocol) error {
+
+ var _field TAgentServiceVersion
if v, err := iprot.ReadI32(); err != nil {
return err
} else {
- p.ProtocolVersion = TAgentServiceVersion(v)
+ _field = TAgentServiceVersion(v)
}
+ p.ProtocolVersion = _field
return nil
}
-
func (p *TAgentPublishRequest) ReadField2(iprot thrift.TProtocol) error {
_, size, err := iprot.ReadListBegin()
if err != nil {
return err
}
- p.Updates = make([]*TTopicUpdate, 0, size)
+ _field := make([]*TTopicUpdate, 0, size)
+ values := make([]TTopicUpdate, size)
for i := 0; i < size; i++ {
- _elem := NewTTopicUpdate()
+ _elem := &values[i]
+ _elem.InitDefault()
+
if err := _elem.Read(iprot); err != nil {
return err
}
- p.Updates = append(p.Updates, _elem)
+ _field = append(_field, _elem)
}
if err := iprot.ReadListEnd(); err != nil {
return err
}
+ p.Updates = _field
return nil
}
@@ -25231,7 +28833,6 @@ func (p *TAgentPublishRequest) Write(oprot thrift.TProtocol) (err error) {
fieldId = 2
goto WriteFieldError
}
-
}
if err = oprot.WriteFieldStop(); err != nil {
goto WriteFieldStopError
@@ -25297,6 +28898,7 @@ func (p *TAgentPublishRequest) String() string {
return ""
}
return fmt.Sprintf("TAgentPublishRequest(%+v)", *p)
+
}
func (p *TAgentPublishRequest) DeepEqual(ano *TAgentPublishRequest) bool {
diff --git a/pkg/rpc/kitex_gen/agentservice/k-AgentService.go b/pkg/rpc/kitex_gen/agentservice/k-AgentService.go
index 6f4cbac9..667cffd1 100644
--- a/pkg/rpc/kitex_gen/agentservice/k-AgentService.go
+++ b/pkg/rpc/kitex_gen/agentservice/k-AgentService.go
@@ -1,4 +1,4 @@
-// Code generated by Kitex v0.4.4. DO NOT EDIT.
+// Code generated by Kitex v0.8.0. DO NOT EDIT.
package agentservice
@@ -11,6 +11,7 @@ import (
"github.com/apache/thrift/lib/go/thrift"
"github.com/cloudwego/kitex/pkg/protocol/bthrift"
+
"github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/descriptors"
"github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/exprs"
"github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/palointernalservice"
@@ -319,6 +320,76 @@ func (p *TTabletSchema) FastRead(buf []byte) (int, error) {
goto SkipFieldError
}
}
+ case 19:
+ if fieldTypeId == thrift.LIST {
+ l, err = p.FastReadField19(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+ case 20:
+ if fieldTypeId == thrift.LIST {
+ l, err = p.FastReadField20(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+ case 21:
+ if fieldTypeId == thrift.I64 {
+ l, err = p.FastReadField21(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+ case 22:
+ if fieldTypeId == thrift.BOOL {
+ l, err = p.FastReadField22(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+ case 23:
+ if fieldTypeId == thrift.I64 {
+ l, err = p.FastReadField23(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
default:
l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
offset += l
@@ -655,6 +726,108 @@ func (p *TTabletSchema) FastReadField18(buf []byte) (int, error) {
return offset, nil
}
+func (p *TTabletSchema) FastReadField19(buf []byte) (int, error) {
+ offset := 0
+
+ _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:])
+ offset += l
+ if err != nil {
+ return offset, err
+ }
+ p.ClusterKeyIdxes = make([]int32, 0, size)
+ for i := 0; i < size; i++ {
+ var _elem int32
+ if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+
+ _elem = v
+
+ }
+
+ p.ClusterKeyIdxes = append(p.ClusterKeyIdxes, _elem)
+ }
+ if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+ }
+ return offset, nil
+}
+
+func (p *TTabletSchema) FastReadField20(buf []byte) (int, error) {
+ offset := 0
+
+ _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:])
+ offset += l
+ if err != nil {
+ return offset, err
+ }
+ p.RowStoreColCids = make([]int32, 0, size)
+ for i := 0; i < size; i++ {
+ var _elem int32
+ if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+
+ _elem = v
+
+ }
+
+ p.RowStoreColCids = append(p.RowStoreColCids, _elem)
+ }
+ if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+ }
+ return offset, nil
+}
+
+func (p *TTabletSchema) FastReadField21(buf []byte) (int, error) {
+ offset := 0
+
+ if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+
+ p.RowStorePageSize = v
+
+ }
+ return offset, nil
+}
+
+func (p *TTabletSchema) FastReadField22(buf []byte) (int, error) {
+ offset := 0
+
+ if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+
+ p.VariantEnableFlattenNested = v
+
+ }
+ return offset, nil
+}
+
+func (p *TTabletSchema) FastReadField23(buf []byte) (int, error) {
+ offset := 0
+
+ if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+
+ p.StoragePageSize = v
+
+ }
+ return offset, nil
+}
+
// for compatibility
func (p *TTabletSchema) FastWrite(buf []byte) int {
return 0
@@ -677,11 +850,16 @@ func (p *TTabletSchema) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryW
offset += p.fastWriteField16(buf[offset:], binaryWriter)
offset += p.fastWriteField17(buf[offset:], binaryWriter)
offset += p.fastWriteField18(buf[offset:], binaryWriter)
+ offset += p.fastWriteField21(buf[offset:], binaryWriter)
+ offset += p.fastWriteField22(buf[offset:], binaryWriter)
+ offset += p.fastWriteField23(buf[offset:], binaryWriter)
offset += p.fastWriteField3(buf[offset:], binaryWriter)
offset += p.fastWriteField4(buf[offset:], binaryWriter)
offset += p.fastWriteField5(buf[offset:], binaryWriter)
offset += p.fastWriteField7(buf[offset:], binaryWriter)
offset += p.fastWriteField11(buf[offset:], binaryWriter)
+ offset += p.fastWriteField19(buf[offset:], binaryWriter)
+ offset += p.fastWriteField20(buf[offset:], binaryWriter)
}
offset += bthrift.Binary.WriteFieldStop(buf[offset:])
offset += bthrift.Binary.WriteStructEnd(buf[offset:])
@@ -710,6 +888,11 @@ func (p *TTabletSchema) BLength() int {
l += p.field16Length()
l += p.field17Length()
l += p.field18Length()
+ l += p.field19Length()
+ l += p.field20Length()
+ l += p.field21Length()
+ l += p.field22Length()
+ l += p.field23Length()
}
l += bthrift.Binary.FieldStopLength()
l += bthrift.Binary.StructEndLength()
@@ -918,6 +1101,77 @@ func (p *TTabletSchema) fastWriteField18(buf []byte, binaryWriter bthrift.Binary
return offset
}
+func (p *TTabletSchema) fastWriteField19(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ if p.IsSetClusterKeyIdxes() {
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "cluster_key_idxes", thrift.LIST, 19)
+ listBeginOffset := offset
+ offset += bthrift.Binary.ListBeginLength(thrift.I32, 0)
+ var length int
+ for _, v := range p.ClusterKeyIdxes {
+ length++
+ offset += bthrift.Binary.WriteI32(buf[offset:], v)
+
+ }
+ bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.I32, length)
+ offset += bthrift.Binary.WriteListEnd(buf[offset:])
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ }
+ return offset
+}
+
+func (p *TTabletSchema) fastWriteField20(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ if p.IsSetRowStoreColCids() {
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "row_store_col_cids", thrift.LIST, 20)
+ listBeginOffset := offset
+ offset += bthrift.Binary.ListBeginLength(thrift.I32, 0)
+ var length int
+ for _, v := range p.RowStoreColCids {
+ length++
+ offset += bthrift.Binary.WriteI32(buf[offset:], v)
+
+ }
+ bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.I32, length)
+ offset += bthrift.Binary.WriteListEnd(buf[offset:])
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ }
+ return offset
+}
+
+func (p *TTabletSchema) fastWriteField21(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ if p.IsSetRowStorePageSize() {
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "row_store_page_size", thrift.I64, 21)
+ offset += bthrift.Binary.WriteI64(buf[offset:], p.RowStorePageSize)
+
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ }
+ return offset
+}
+
+func (p *TTabletSchema) fastWriteField22(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ if p.IsSetVariantEnableFlattenNested() {
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "variant_enable_flatten_nested", thrift.BOOL, 22)
+ offset += bthrift.Binary.WriteBool(buf[offset:], p.VariantEnableFlattenNested)
+
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ }
+ return offset
+}
+
+func (p *TTabletSchema) fastWriteField23(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ if p.IsSetStoragePageSize() {
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "storage_page_size", thrift.I64, 23)
+ offset += bthrift.Binary.WriteI64(buf[offset:], p.StoragePageSize)
+
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ }
+ return offset
+}
+
func (p *TTabletSchema) field1Length() int {
l := 0
l += bthrift.Binary.FieldBeginLength("short_key_column_count", thrift.I16, 1)
@@ -1112,6 +1366,65 @@ func (p *TTabletSchema) field18Length() int {
return l
}
+func (p *TTabletSchema) field19Length() int {
+ l := 0
+ if p.IsSetClusterKeyIdxes() {
+ l += bthrift.Binary.FieldBeginLength("cluster_key_idxes", thrift.LIST, 19)
+ l += bthrift.Binary.ListBeginLength(thrift.I32, len(p.ClusterKeyIdxes))
+ var tmpV int32
+ l += bthrift.Binary.I32Length(int32(tmpV)) * len(p.ClusterKeyIdxes)
+ l += bthrift.Binary.ListEndLength()
+ l += bthrift.Binary.FieldEndLength()
+ }
+ return l
+}
+
+func (p *TTabletSchema) field20Length() int {
+ l := 0
+ if p.IsSetRowStoreColCids() {
+ l += bthrift.Binary.FieldBeginLength("row_store_col_cids", thrift.LIST, 20)
+ l += bthrift.Binary.ListBeginLength(thrift.I32, len(p.RowStoreColCids))
+ var tmpV int32
+ l += bthrift.Binary.I32Length(int32(tmpV)) * len(p.RowStoreColCids)
+ l += bthrift.Binary.ListEndLength()
+ l += bthrift.Binary.FieldEndLength()
+ }
+ return l
+}
+
+func (p *TTabletSchema) field21Length() int {
+ l := 0
+ if p.IsSetRowStorePageSize() {
+ l += bthrift.Binary.FieldBeginLength("row_store_page_size", thrift.I64, 21)
+ l += bthrift.Binary.I64Length(p.RowStorePageSize)
+
+ l += bthrift.Binary.FieldEndLength()
+ }
+ return l
+}
+
+func (p *TTabletSchema) field22Length() int {
+ l := 0
+ if p.IsSetVariantEnableFlattenNested() {
+ l += bthrift.Binary.FieldBeginLength("variant_enable_flatten_nested", thrift.BOOL, 22)
+ l += bthrift.Binary.BoolLength(p.VariantEnableFlattenNested)
+
+ l += bthrift.Binary.FieldEndLength()
+ }
+ return l
+}
+
+func (p *TTabletSchema) field23Length() int {
+ l := 0
+ if p.IsSetStoragePageSize() {
+ l += bthrift.Binary.FieldBeginLength("storage_page_size", thrift.I64, 23)
+ l += bthrift.Binary.I64Length(p.StoragePageSize)
+
+ l += bthrift.Binary.FieldEndLength()
+ }
+ return l
+}
+
func (p *TS3StorageParam) FastRead(buf []byte) (int, error) {
var err error
var offset int
@@ -1274,15 +1587,43 @@ func (p *TS3StorageParam) FastRead(buf []byte) (int, error) {
goto SkipFieldError
}
}
- default:
- l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
- offset += l
- if err != nil {
- goto SkipFieldError
- }
- }
-
- l, err = bthrift.Binary.ReadFieldEnd(buf[offset:])
+ case 11:
+ if fieldTypeId == thrift.STRING {
+ l, err = p.FastReadField11(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+ case 12:
+ if fieldTypeId == thrift.I32 {
+ l, err = p.FastReadField12(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+ default:
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+
+ l, err = bthrift.Binary.ReadFieldEnd(buf[offset:])
offset += l
if err != nil {
goto ReadFieldEndError
@@ -1443,6 +1784,34 @@ func (p *TS3StorageParam) FastReadField10(buf []byte) (int, error) {
return offset, nil
}
+func (p *TS3StorageParam) FastReadField11(buf []byte) (int, error) {
+ offset := 0
+
+ if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+ p.Token = &v
+
+ }
+ return offset, nil
+}
+
+func (p *TS3StorageParam) FastReadField12(buf []byte) (int, error) {
+ offset := 0
+
+ if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+
+ tmp := TObjStorageType(v)
+ p.Provider = &tmp
+
+ }
+ return offset, nil
+}
+
// for compatibility
func (p *TS3StorageParam) FastWrite(buf []byte) int {
return 0
@@ -1462,6 +1831,8 @@ func (p *TS3StorageParam) FastWriteNocopy(buf []byte, binaryWriter bthrift.Binar
offset += p.fastWriteField4(buf[offset:], binaryWriter)
offset += p.fastWriteField8(buf[offset:], binaryWriter)
offset += p.fastWriteField9(buf[offset:], binaryWriter)
+ offset += p.fastWriteField11(buf[offset:], binaryWriter)
+ offset += p.fastWriteField12(buf[offset:], binaryWriter)
}
offset += bthrift.Binary.WriteFieldStop(buf[offset:])
offset += bthrift.Binary.WriteStructEnd(buf[offset:])
@@ -1482,6 +1853,8 @@ func (p *TS3StorageParam) BLength() int {
l += p.field8Length()
l += p.field9Length()
l += p.field10Length()
+ l += p.field11Length()
+ l += p.field12Length()
}
l += bthrift.Binary.FieldStopLength()
l += bthrift.Binary.StructEndLength()
@@ -1598,6 +1971,28 @@ func (p *TS3StorageParam) fastWriteField10(buf []byte, binaryWriter bthrift.Bina
return offset
}
+func (p *TS3StorageParam) fastWriteField11(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ if p.IsSetToken() {
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "token", thrift.STRING, 11)
+ offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Token)
+
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ }
+ return offset
+}
+
+func (p *TS3StorageParam) fastWriteField12(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ if p.IsSetProvider() {
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "provider", thrift.I32, 12)
+ offset += bthrift.Binary.WriteI32(buf[offset:], int32(*p.Provider))
+
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ }
+ return offset
+}
+
func (p *TS3StorageParam) field1Length() int {
l := 0
if p.IsSetEndpoint() {
@@ -1708,6 +2103,28 @@ func (p *TS3StorageParam) field10Length() int {
return l
}
+func (p *TS3StorageParam) field11Length() int {
+ l := 0
+ if p.IsSetToken() {
+ l += bthrift.Binary.FieldBeginLength("token", thrift.STRING, 11)
+ l += bthrift.Binary.StringLengthNocopy(*p.Token)
+
+ l += bthrift.Binary.FieldEndLength()
+ }
+ return l
+}
+
+func (p *TS3StorageParam) field12Length() int {
+ l := 0
+ if p.IsSetProvider() {
+ l += bthrift.Binary.FieldBeginLength("provider", thrift.I32, 12)
+ l += bthrift.Binary.I32Length(int32(*p.Provider))
+
+ l += bthrift.Binary.FieldEndLength()
+ }
+ return l
+}
+
func (p *TStoragePolicy) FastRead(buf []byte) (int, error) {
var err error
var offset int
@@ -2174,6 +2591,20 @@ func (p *TStorageResource) FastRead(buf []byte) (int, error) {
goto SkipFieldError
}
}
+ case 5:
+ if fieldTypeId == thrift.STRUCT {
+ l, err = p.FastReadField5(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
default:
l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
offset += l
@@ -2261,6 +2692,19 @@ func (p *TStorageResource) FastReadField4(buf []byte) (int, error) {
return offset, nil
}
+func (p *TStorageResource) FastReadField5(buf []byte) (int, error) {
+ offset := 0
+
+ tmp := plannodes.NewTHdfsParams()
+ if l, err := tmp.FastRead(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+ }
+ p.HdfsStorageParam = tmp
+ return offset, nil
+}
+
// for compatibility
func (p *TStorageResource) FastWrite(buf []byte) int {
return 0
@@ -2274,6 +2718,7 @@ func (p *TStorageResource) FastWriteNocopy(buf []byte, binaryWriter bthrift.Bina
offset += p.fastWriteField3(buf[offset:], binaryWriter)
offset += p.fastWriteField2(buf[offset:], binaryWriter)
offset += p.fastWriteField4(buf[offset:], binaryWriter)
+ offset += p.fastWriteField5(buf[offset:], binaryWriter)
}
offset += bthrift.Binary.WriteFieldStop(buf[offset:])
offset += bthrift.Binary.WriteStructEnd(buf[offset:])
@@ -2288,6 +2733,7 @@ func (p *TStorageResource) BLength() int {
l += p.field2Length()
l += p.field3Length()
l += p.field4Length()
+ l += p.field5Length()
}
l += bthrift.Binary.FieldStopLength()
l += bthrift.Binary.StructEndLength()
@@ -2337,6 +2783,16 @@ func (p *TStorageResource) fastWriteField4(buf []byte, binaryWriter bthrift.Bina
return offset
}
+func (p *TStorageResource) fastWriteField5(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ if p.IsSetHdfsStorageParam() {
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "hdfs_storage_param", thrift.STRUCT, 5)
+ offset += p.HdfsStorageParam.FastWriteNocopy(buf[offset:], binaryWriter)
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ }
+ return offset
+}
+
func (p *TStorageResource) field1Length() int {
l := 0
if p.IsSetId() {
@@ -2380,6 +2836,16 @@ func (p *TStorageResource) field4Length() int {
return l
}
+func (p *TStorageResource) field5Length() int {
+ l := 0
+ if p.IsSetHdfsStorageParam() {
+ l += bthrift.Binary.FieldBeginLength("hdfs_storage_param", thrift.STRUCT, 5)
+ l += p.HdfsStorageParam.BLength()
+ l += bthrift.Binary.FieldEndLength()
+ }
+ return l
+}
+
func (p *TPushStoragePolicyReq) FastRead(buf []byte) (int, error) {
var err error
var offset int
@@ -2690,7 +3156,84 @@ func (p *TPushStoragePolicyReq) field3Length() int {
return l
}
-func (p *TBinlogConfig) FastRead(buf []byte) (int, error) {
+func (p *TCleanTrashReq) FastRead(buf []byte) (int, error) {
+ var err error
+ var offset int
+ var l int
+ var fieldTypeId thrift.TType
+ var fieldId int16
+ _, l, err = bthrift.Binary.ReadStructBegin(buf)
+ offset += l
+ if err != nil {
+ goto ReadStructBeginError
+ }
+
+ for {
+ _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldBeginError
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+
+ l, err = bthrift.Binary.ReadFieldEnd(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldEndError
+ }
+ }
+ l, err = bthrift.Binary.ReadStructEnd(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadStructEndError
+ }
+
+ return offset, nil
+ReadStructBeginError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err)
+ReadFieldBeginError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
+SkipFieldError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
+ReadFieldEndError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err)
+ReadStructEndError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+}
+
+// for compatibility
+func (p *TCleanTrashReq) FastWrite(buf []byte) int {
+ return 0
+}
+
+func (p *TCleanTrashReq) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TCleanTrashReq")
+ if p != nil {
+ }
+ offset += bthrift.Binary.WriteFieldStop(buf[offset:])
+ offset += bthrift.Binary.WriteStructEnd(buf[offset:])
+ return offset
+}
+
+func (p *TCleanTrashReq) BLength() int {
+ l := 0
+ l += bthrift.Binary.StructBeginLength("TCleanTrashReq")
+ if p != nil {
+ }
+ l += bthrift.Binary.FieldStopLength()
+ l += bthrift.Binary.StructEndLength()
+ return l
+}
+
+func (p *TCleanUDFCacheReq) FastRead(buf []byte) (int, error) {
var err error
var offset int
var l int
@@ -2713,7 +3256,7 @@ func (p *TBinlogConfig) FastRead(buf []byte) (int, error) {
}
switch fieldId {
case 1:
- if fieldTypeId == thrift.BOOL {
+ if fieldTypeId == thrift.STRING {
l, err = p.FastReadField1(buf[offset:])
offset += l
if err != nil {
@@ -2726,48 +3269,6 @@ func (p *TBinlogConfig) FastRead(buf []byte) (int, error) {
goto SkipFieldError
}
}
- case 2:
- if fieldTypeId == thrift.I64 {
- l, err = p.FastReadField2(buf[offset:])
- offset += l
- if err != nil {
- goto ReadFieldError
- }
- } else {
- l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
- offset += l
- if err != nil {
- goto SkipFieldError
- }
- }
- case 3:
- if fieldTypeId == thrift.I64 {
- l, err = p.FastReadField3(buf[offset:])
- offset += l
- if err != nil {
- goto ReadFieldError
- }
- } else {
- l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
- offset += l
- if err != nil {
- goto SkipFieldError
- }
- }
- case 4:
- if fieldTypeId == thrift.I64 {
- l, err = p.FastReadField4(buf[offset:])
- offset += l
- if err != nil {
- goto ReadFieldError
- }
- } else {
- l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
- offset += l
- if err != nil {
- goto SkipFieldError
- }
- }
default:
l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
offset += l
@@ -2794,7 +3295,7 @@ ReadStructBeginError:
ReadFieldBeginError:
return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
ReadFieldError:
- return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TBinlogConfig[fieldId]), err)
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TCleanUDFCacheReq[fieldId]), err)
SkipFieldError:
return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
ReadFieldEndError:
@@ -2803,24 +3304,199 @@ ReadStructEndError:
return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
}
-func (p *TBinlogConfig) FastReadField1(buf []byte) (int, error) {
+func (p *TCleanUDFCacheReq) FastReadField1(buf []byte) (int, error) {
offset := 0
- if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil {
+ if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil {
return offset, err
} else {
offset += l
- p.Enable = &v
+ p.FunctionSignature = &v
}
return offset, nil
}
-func (p *TBinlogConfig) FastReadField2(buf []byte) (int, error) {
- offset := 0
+// for compatibility
+func (p *TCleanUDFCacheReq) FastWrite(buf []byte) int {
+ return 0
+}
- if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil {
- return offset, err
+func (p *TCleanUDFCacheReq) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TCleanUDFCacheReq")
+ if p != nil {
+ offset += p.fastWriteField1(buf[offset:], binaryWriter)
+ }
+ offset += bthrift.Binary.WriteFieldStop(buf[offset:])
+ offset += bthrift.Binary.WriteStructEnd(buf[offset:])
+ return offset
+}
+
+func (p *TCleanUDFCacheReq) BLength() int {
+ l := 0
+ l += bthrift.Binary.StructBeginLength("TCleanUDFCacheReq")
+ if p != nil {
+ l += p.field1Length()
+ }
+ l += bthrift.Binary.FieldStopLength()
+ l += bthrift.Binary.StructEndLength()
+ return l
+}
+
+func (p *TCleanUDFCacheReq) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ if p.IsSetFunctionSignature() {
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "function_signature", thrift.STRING, 1)
+ offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.FunctionSignature)
+
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ }
+ return offset
+}
+
+func (p *TCleanUDFCacheReq) field1Length() int {
+ l := 0
+ if p.IsSetFunctionSignature() {
+ l += bthrift.Binary.FieldBeginLength("function_signature", thrift.STRING, 1)
+ l += bthrift.Binary.StringLengthNocopy(*p.FunctionSignature)
+
+ l += bthrift.Binary.FieldEndLength()
+ }
+ return l
+}
+
+func (p *TBinlogConfig) FastRead(buf []byte) (int, error) {
+ var err error
+ var offset int
+ var l int
+ var fieldTypeId thrift.TType
+ var fieldId int16
+ _, l, err = bthrift.Binary.ReadStructBegin(buf)
+ offset += l
+ if err != nil {
+ goto ReadStructBeginError
+ }
+
+ for {
+ _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldBeginError
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+ switch fieldId {
+ case 1:
+ if fieldTypeId == thrift.BOOL {
+ l, err = p.FastReadField1(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+ case 2:
+ if fieldTypeId == thrift.I64 {
+ l, err = p.FastReadField2(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+ case 3:
+ if fieldTypeId == thrift.I64 {
+ l, err = p.FastReadField3(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+ case 4:
+ if fieldTypeId == thrift.I64 {
+ l, err = p.FastReadField4(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+ default:
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+
+ l, err = bthrift.Binary.ReadFieldEnd(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldEndError
+ }
+ }
+ l, err = bthrift.Binary.ReadStructEnd(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadStructEndError
+ }
+
+ return offset, nil
+ReadStructBeginError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err)
+ReadFieldBeginError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
+ReadFieldError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TBinlogConfig[fieldId]), err)
+SkipFieldError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
+ReadFieldEndError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err)
+ReadStructEndError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+}
+
+func (p *TBinlogConfig) FastReadField1(buf []byte) (int, error) {
+ offset := 0
+
+ if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+ p.Enable = &v
+
+ }
+ return offset, nil
+}
+
+func (p *TBinlogConfig) FastReadField2(buf []byte) (int, error) {
+ offset := 0
+
+ if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil {
+ return offset, err
} else {
offset += l
p.TtlSeconds = &v
@@ -3324,6 +4000,90 @@ func (p *TCreateTabletReq) FastRead(buf []byte) (int, error) {
goto SkipFieldError
}
}
+ case 26:
+ if fieldTypeId == thrift.I64 {
+ l, err = p.FastReadField26(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+ case 27:
+ if fieldTypeId == thrift.I64 {
+ l, err = p.FastReadField27(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+ case 28:
+ if fieldTypeId == thrift.I32 {
+ l, err = p.FastReadField28(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+ case 29:
+ if fieldTypeId == thrift.I32 {
+ l, err = p.FastReadField29(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+ case 1000:
+ if fieldTypeId == thrift.BOOL {
+ l, err = p.FastReadField1000(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+ case 1001:
+ if fieldTypeId == thrift.BOOL {
+ l, err = p.FastReadField1001(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
default:
l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
offset += l
@@ -3683,14 +4443,98 @@ func (p *TCreateTabletReq) FastReadField25(buf []byte) (int, error) {
return offset, nil
}
-// for compatibility
-func (p *TCreateTabletReq) FastWrite(buf []byte) int {
- return 0
+func (p *TCreateTabletReq) FastReadField26(buf []byte) (int, error) {
+ offset := 0
+
+ if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+
+ p.TimeSeriesCompactionEmptyRowsetsThreshold = v
+
+ }
+ return offset, nil
}
-func (p *TCreateTabletReq) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+func (p *TCreateTabletReq) FastReadField27(buf []byte) (int, error) {
offset := 0
- offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TCreateTabletReq")
+
+ if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+
+ p.TimeSeriesCompactionLevelThreshold = v
+
+ }
+ return offset, nil
+}
+
+func (p *TCreateTabletReq) FastReadField28(buf []byte) (int, error) {
+ offset := 0
+
+ if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+
+ p.InvertedIndexStorageFormat = TInvertedIndexStorageFormat(v)
+
+ }
+ return offset, nil
+}
+
+func (p *TCreateTabletReq) FastReadField29(buf []byte) (int, error) {
+ offset := 0
+
+ if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+
+ p.InvertedIndexFileStorageFormat = types.TInvertedIndexFileStorageFormat(v)
+
+ }
+ return offset, nil
+}
+
+func (p *TCreateTabletReq) FastReadField1000(buf []byte) (int, error) {
+ offset := 0
+
+ if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+
+ p.IsInMemory = v
+
+ }
+ return offset, nil
+}
+
+func (p *TCreateTabletReq) FastReadField1001(buf []byte) (int, error) {
+ offset := 0
+
+ if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+
+ p.IsPersistent = v
+
+ }
+ return offset, nil
+}
+
+// for compatibility
+func (p *TCreateTabletReq) FastWrite(buf []byte) int {
+ return 0
+}
+
+func (p *TCreateTabletReq) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TCreateTabletReq")
if p != nil {
offset += p.fastWriteField1(buf[offset:], binaryWriter)
offset += p.fastWriteField3(buf[offset:], binaryWriter)
@@ -3708,6 +4552,10 @@ func (p *TCreateTabletReq) FastWriteNocopy(buf []byte, binaryWriter bthrift.Bina
offset += p.fastWriteField23(buf[offset:], binaryWriter)
offset += p.fastWriteField24(buf[offset:], binaryWriter)
offset += p.fastWriteField25(buf[offset:], binaryWriter)
+ offset += p.fastWriteField26(buf[offset:], binaryWriter)
+ offset += p.fastWriteField27(buf[offset:], binaryWriter)
+ offset += p.fastWriteField1000(buf[offset:], binaryWriter)
+ offset += p.fastWriteField1001(buf[offset:], binaryWriter)
offset += p.fastWriteField2(buf[offset:], binaryWriter)
offset += p.fastWriteField5(buf[offset:], binaryWriter)
offset += p.fastWriteField13(buf[offset:], binaryWriter)
@@ -3715,6 +4563,8 @@ func (p *TCreateTabletReq) FastWriteNocopy(buf []byte, binaryWriter bthrift.Bina
offset += p.fastWriteField16(buf[offset:], binaryWriter)
offset += p.fastWriteField21(buf[offset:], binaryWriter)
offset += p.fastWriteField22(buf[offset:], binaryWriter)
+ offset += p.fastWriteField28(buf[offset:], binaryWriter)
+ offset += p.fastWriteField29(buf[offset:], binaryWriter)
}
offset += bthrift.Binary.WriteFieldStop(buf[offset:])
offset += bthrift.Binary.WriteStructEnd(buf[offset:])
@@ -3748,6 +4598,12 @@ func (p *TCreateTabletReq) BLength() int {
l += p.field23Length()
l += p.field24Length()
l += p.field25Length()
+ l += p.field26Length()
+ l += p.field27Length()
+ l += p.field28Length()
+ l += p.field29Length()
+ l += p.field1000Length()
+ l += p.field1001Length()
}
l += bthrift.Binary.FieldStopLength()
l += bthrift.Binary.StructEndLength()
@@ -4001,6 +4857,72 @@ func (p *TCreateTabletReq) fastWriteField25(buf []byte, binaryWriter bthrift.Bin
return offset
}
+func (p *TCreateTabletReq) fastWriteField26(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ if p.IsSetTimeSeriesCompactionEmptyRowsetsThreshold() {
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "time_series_compaction_empty_rowsets_threshold", thrift.I64, 26)
+ offset += bthrift.Binary.WriteI64(buf[offset:], p.TimeSeriesCompactionEmptyRowsetsThreshold)
+
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ }
+ return offset
+}
+
+func (p *TCreateTabletReq) fastWriteField27(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ if p.IsSetTimeSeriesCompactionLevelThreshold() {
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "time_series_compaction_level_threshold", thrift.I64, 27)
+ offset += bthrift.Binary.WriteI64(buf[offset:], p.TimeSeriesCompactionLevelThreshold)
+
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ }
+ return offset
+}
+
+func (p *TCreateTabletReq) fastWriteField28(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ if p.IsSetInvertedIndexStorageFormat() {
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "inverted_index_storage_format", thrift.I32, 28)
+ offset += bthrift.Binary.WriteI32(buf[offset:], int32(p.InvertedIndexStorageFormat))
+
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ }
+ return offset
+}
+
+func (p *TCreateTabletReq) fastWriteField29(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ if p.IsSetInvertedIndexFileStorageFormat() {
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "inverted_index_file_storage_format", thrift.I32, 29)
+ offset += bthrift.Binary.WriteI32(buf[offset:], int32(p.InvertedIndexFileStorageFormat))
+
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ }
+ return offset
+}
+
+func (p *TCreateTabletReq) fastWriteField1000(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ if p.IsSetIsInMemory() {
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "is_in_memory", thrift.BOOL, 1000)
+ offset += bthrift.Binary.WriteBool(buf[offset:], p.IsInMemory)
+
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ }
+ return offset
+}
+
+func (p *TCreateTabletReq) fastWriteField1001(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ if p.IsSetIsPersistent() {
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "is_persistent", thrift.BOOL, 1001)
+ offset += bthrift.Binary.WriteBool(buf[offset:], p.IsPersistent)
+
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ }
+ return offset
+}
+
func (p *TCreateTabletReq) field1Length() int {
l := 0
l += bthrift.Binary.FieldBeginLength("tablet_id", thrift.I64, 1)
@@ -4248,6 +5170,72 @@ func (p *TCreateTabletReq) field25Length() int {
return l
}
+func (p *TCreateTabletReq) field26Length() int {
+ l := 0
+ if p.IsSetTimeSeriesCompactionEmptyRowsetsThreshold() {
+ l += bthrift.Binary.FieldBeginLength("time_series_compaction_empty_rowsets_threshold", thrift.I64, 26)
+ l += bthrift.Binary.I64Length(p.TimeSeriesCompactionEmptyRowsetsThreshold)
+
+ l += bthrift.Binary.FieldEndLength()
+ }
+ return l
+}
+
+func (p *TCreateTabletReq) field27Length() int {
+ l := 0
+ if p.IsSetTimeSeriesCompactionLevelThreshold() {
+ l += bthrift.Binary.FieldBeginLength("time_series_compaction_level_threshold", thrift.I64, 27)
+ l += bthrift.Binary.I64Length(p.TimeSeriesCompactionLevelThreshold)
+
+ l += bthrift.Binary.FieldEndLength()
+ }
+ return l
+}
+
+func (p *TCreateTabletReq) field28Length() int {
+ l := 0
+ if p.IsSetInvertedIndexStorageFormat() {
+ l += bthrift.Binary.FieldBeginLength("inverted_index_storage_format", thrift.I32, 28)
+ l += bthrift.Binary.I32Length(int32(p.InvertedIndexStorageFormat))
+
+ l += bthrift.Binary.FieldEndLength()
+ }
+ return l
+}
+
+func (p *TCreateTabletReq) field29Length() int {
+ l := 0
+ if p.IsSetInvertedIndexFileStorageFormat() {
+ l += bthrift.Binary.FieldBeginLength("inverted_index_file_storage_format", thrift.I32, 29)
+ l += bthrift.Binary.I32Length(int32(p.InvertedIndexFileStorageFormat))
+
+ l += bthrift.Binary.FieldEndLength()
+ }
+ return l
+}
+
+func (p *TCreateTabletReq) field1000Length() int {
+ l := 0
+ if p.IsSetIsInMemory() {
+ l += bthrift.Binary.FieldBeginLength("is_in_memory", thrift.BOOL, 1000)
+ l += bthrift.Binary.BoolLength(p.IsInMemory)
+
+ l += bthrift.Binary.FieldEndLength()
+ }
+ return l
+}
+
+func (p *TCreateTabletReq) field1001Length() int {
+ l := 0
+ if p.IsSetIsPersistent() {
+ l += bthrift.Binary.FieldBeginLength("is_persistent", thrift.BOOL, 1001)
+ l += bthrift.Binary.BoolLength(p.IsPersistent)
+
+ l += bthrift.Binary.FieldEndLength()
+ }
+ return l
+}
+
func (p *TDropTabletReq) FastRead(buf []byte) (int, error) {
var err error
var offset int
@@ -5208,6 +6196,48 @@ func (p *TAlterTabletReqV2) FastRead(buf []byte) (int, error) {
goto SkipFieldError
}
}
+ case 1000:
+ if fieldTypeId == thrift.I64 {
+ l, err = p.FastReadField1000(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+ case 1001:
+ if fieldTypeId == thrift.I64 {
+ l, err = p.FastReadField1001(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+ case 1002:
+ if fieldTypeId == thrift.STRING {
+ l, err = p.FastReadField1002(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
default:
l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
offset += l
@@ -5441,6 +6471,45 @@ func (p *TAlterTabletReqV2) FastReadField11(buf []byte) (int, error) {
return offset, nil
}
+func (p *TAlterTabletReqV2) FastReadField1000(buf []byte) (int, error) {
+ offset := 0
+
+ if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+ p.JobId = &v
+
+ }
+ return offset, nil
+}
+
+func (p *TAlterTabletReqV2) FastReadField1001(buf []byte) (int, error) {
+ offset := 0
+
+ if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+ p.Expiration = &v
+
+ }
+ return offset, nil
+}
+
+func (p *TAlterTabletReqV2) FastReadField1002(buf []byte) (int, error) {
+ offset := 0
+
+ if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+ p.StorageVaultId = &v
+
+ }
+ return offset, nil
+}
+
// for compatibility
func (p *TAlterTabletReqV2) FastWrite(buf []byte) int {
return 0
@@ -5457,10 +6526,13 @@ func (p *TAlterTabletReqV2) FastWriteNocopy(buf []byte, binaryWriter bthrift.Bin
offset += p.fastWriteField5(buf[offset:], binaryWriter)
offset += p.fastWriteField6(buf[offset:], binaryWriter)
offset += p.fastWriteField11(buf[offset:], binaryWriter)
+ offset += p.fastWriteField1000(buf[offset:], binaryWriter)
+ offset += p.fastWriteField1001(buf[offset:], binaryWriter)
offset += p.fastWriteField7(buf[offset:], binaryWriter)
offset += p.fastWriteField8(buf[offset:], binaryWriter)
offset += p.fastWriteField9(buf[offset:], binaryWriter)
offset += p.fastWriteField10(buf[offset:], binaryWriter)
+ offset += p.fastWriteField1002(buf[offset:], binaryWriter)
}
offset += bthrift.Binary.WriteFieldStop(buf[offset:])
offset += bthrift.Binary.WriteStructEnd(buf[offset:])
@@ -5482,6 +6554,9 @@ func (p *TAlterTabletReqV2) BLength() int {
l += p.field9Length()
l += p.field10Length()
l += p.field11Length()
+ l += p.field1000Length()
+ l += p.field1001Length()
+ l += p.field1002Length()
}
l += bthrift.Binary.FieldStopLength()
l += bthrift.Binary.StructEndLength()
@@ -5614,6 +6689,39 @@ func (p *TAlterTabletReqV2) fastWriteField11(buf []byte, binaryWriter bthrift.Bi
return offset
}
+func (p *TAlterTabletReqV2) fastWriteField1000(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ if p.IsSetJobId() {
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "job_id", thrift.I64, 1000)
+ offset += bthrift.Binary.WriteI64(buf[offset:], *p.JobId)
+
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ }
+ return offset
+}
+
+func (p *TAlterTabletReqV2) fastWriteField1001(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ if p.IsSetExpiration() {
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "expiration", thrift.I64, 1001)
+ offset += bthrift.Binary.WriteI64(buf[offset:], *p.Expiration)
+
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ }
+ return offset
+}
+
+func (p *TAlterTabletReqV2) fastWriteField1002(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ if p.IsSetStorageVaultId() {
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "storage_vault_id", thrift.STRING, 1002)
+ offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.StorageVaultId)
+
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ }
+ return offset
+}
+
func (p *TAlterTabletReqV2) field1Length() int {
l := 0
l += bthrift.Binary.FieldBeginLength("base_tablet_id", thrift.I64, 1)
@@ -5732,7 +6840,40 @@ func (p *TAlterTabletReqV2) field11Length() int {
return l
}
-func (p *TAlterInvertedIndexReq) FastRead(buf []byte) (int, error) {
+func (p *TAlterTabletReqV2) field1000Length() int {
+ l := 0
+ if p.IsSetJobId() {
+ l += bthrift.Binary.FieldBeginLength("job_id", thrift.I64, 1000)
+ l += bthrift.Binary.I64Length(*p.JobId)
+
+ l += bthrift.Binary.FieldEndLength()
+ }
+ return l
+}
+
+func (p *TAlterTabletReqV2) field1001Length() int {
+ l := 0
+ if p.IsSetExpiration() {
+ l += bthrift.Binary.FieldBeginLength("expiration", thrift.I64, 1001)
+ l += bthrift.Binary.I64Length(*p.Expiration)
+
+ l += bthrift.Binary.FieldEndLength()
+ }
+ return l
+}
+
+func (p *TAlterTabletReqV2) field1002Length() int {
+ l := 0
+ if p.IsSetStorageVaultId() {
+ l += bthrift.Binary.FieldBeginLength("storage_vault_id", thrift.STRING, 1002)
+ l += bthrift.Binary.StringLengthNocopy(*p.StorageVaultId)
+
+ l += bthrift.Binary.FieldEndLength()
+ }
+ return l
+}
+
+func (p *TAlterInvertedIndexReq) FastRead(buf []byte) (int, error) {
var err error
var offset int
var l int
@@ -7536,6 +8677,34 @@ func (p *TPushReq) FastRead(buf []byte) (int, error) {
goto SkipFieldError
}
}
+ case 17:
+ if fieldTypeId == thrift.STRING {
+ l, err = p.FastReadField17(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+ case 18:
+ if fieldTypeId == thrift.I32 {
+ l, err = p.FastReadField18(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
default:
l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
offset += l
@@ -7844,6 +9013,32 @@ func (p *TPushReq) FastReadField16(buf []byte) (int, error) {
return offset, nil
}
+func (p *TPushReq) FastReadField17(buf []byte) (int, error) {
+ offset := 0
+
+ if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+ p.StorageVaultId = &v
+
+ }
+ return offset, nil
+}
+
+func (p *TPushReq) FastReadField18(buf []byte) (int, error) {
+ offset := 0
+
+ if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+ p.SchemaVersion = &v
+
+ }
+ return offset, nil
+}
+
// for compatibility
func (p *TPushReq) FastWrite(buf []byte) int {
return 0
@@ -7863,12 +9058,14 @@ func (p *TPushReq) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter
offset += p.fastWriteField11(buf[offset:], binaryWriter)
offset += p.fastWriteField12(buf[offset:], binaryWriter)
offset += p.fastWriteField13(buf[offset:], binaryWriter)
+ offset += p.fastWriteField18(buf[offset:], binaryWriter)
offset += p.fastWriteField6(buf[offset:], binaryWriter)
offset += p.fastWriteField7(buf[offset:], binaryWriter)
offset += p.fastWriteField9(buf[offset:], binaryWriter)
offset += p.fastWriteField14(buf[offset:], binaryWriter)
offset += p.fastWriteField15(buf[offset:], binaryWriter)
offset += p.fastWriteField16(buf[offset:], binaryWriter)
+ offset += p.fastWriteField17(buf[offset:], binaryWriter)
}
offset += bthrift.Binary.WriteFieldStop(buf[offset:])
offset += bthrift.Binary.WriteStructEnd(buf[offset:])
@@ -7895,6 +9092,8 @@ func (p *TPushReq) BLength() int {
l += p.field14Length()
l += p.field15Length()
l += p.field16Length()
+ l += p.field17Length()
+ l += p.field18Length()
}
l += bthrift.Binary.FieldStopLength()
l += bthrift.Binary.StructEndLength()
@@ -8077,6 +9276,28 @@ func (p *TPushReq) fastWriteField16(buf []byte, binaryWriter bthrift.BinaryWrite
return offset
}
+func (p *TPushReq) fastWriteField17(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ if p.IsSetStorageVaultId() {
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "storage_vault_id", thrift.STRING, 17)
+ offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.StorageVaultId)
+
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ }
+ return offset
+}
+
+func (p *TPushReq) fastWriteField18(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ if p.IsSetSchemaVersion() {
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "schema_version", thrift.I32, 18)
+ offset += bthrift.Binary.WriteI32(buf[offset:], *p.SchemaVersion)
+
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ }
+ return offset
+}
+
func (p *TPushReq) field1Length() int {
l := 0
l += bthrift.Binary.FieldBeginLength("tablet_id", thrift.I64, 1)
@@ -8245,6 +9466,28 @@ func (p *TPushReq) field16Length() int {
return l
}
+func (p *TPushReq) field17Length() int {
+ l := 0
+ if p.IsSetStorageVaultId() {
+ l += bthrift.Binary.FieldBeginLength("storage_vault_id", thrift.STRING, 17)
+ l += bthrift.Binary.StringLengthNocopy(*p.StorageVaultId)
+
+ l += bthrift.Binary.FieldEndLength()
+ }
+ return l
+}
+
+func (p *TPushReq) field18Length() int {
+ l := 0
+ if p.IsSetSchemaVersion() {
+ l += bthrift.Binary.FieldBeginLength("schema_version", thrift.I32, 18)
+ l += bthrift.Binary.I32Length(*p.SchemaVersion)
+
+ l += bthrift.Binary.FieldEndLength()
+ }
+ return l
+}
+
func (p *TCloneReq) FastRead(buf []byte) (int, error) {
var err error
var offset int
@@ -8441,6 +9684,20 @@ func (p *TCloneReq) FastRead(buf []byte) (int, error) {
goto SkipFieldError
}
}
+ case 13:
+ if fieldTypeId == thrift.I64 {
+ l, err = p.FastReadField13(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
default:
l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
offset += l
@@ -8569,7 +9826,7 @@ func (p *TCloneReq) FastReadField5(buf []byte) (int, error) {
return offset, err
} else {
offset += l
- p.CommittedVersion = &v
+ p.Version = &v
}
return offset, nil
@@ -8667,6 +9924,20 @@ func (p *TCloneReq) FastReadField12(buf []byte) (int, error) {
return offset, nil
}
+func (p *TCloneReq) FastReadField13(buf []byte) (int, error) {
+ offset := 0
+
+ if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+
+ p.TableId = v
+
+ }
+ return offset, nil
+}
+
// for compatibility
func (p *TCloneReq) FastWrite(buf []byte) int {
return 0
@@ -8686,6 +9957,7 @@ func (p *TCloneReq) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWrite
offset += p.fastWriteField10(buf[offset:], binaryWriter)
offset += p.fastWriteField11(buf[offset:], binaryWriter)
offset += p.fastWriteField12(buf[offset:], binaryWriter)
+ offset += p.fastWriteField13(buf[offset:], binaryWriter)
offset += p.fastWriteField3(buf[offset:], binaryWriter)
offset += p.fastWriteField4(buf[offset:], binaryWriter)
}
@@ -8710,6 +9982,7 @@ func (p *TCloneReq) BLength() int {
l += p.field10Length()
l += p.field11Length()
l += p.field12Length()
+ l += p.field13Length()
}
l += bthrift.Binary.FieldStopLength()
l += bthrift.Binary.StructEndLength()
@@ -8763,9 +10036,9 @@ func (p *TCloneReq) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWrite
func (p *TCloneReq) fastWriteField5(buf []byte, binaryWriter bthrift.BinaryWriter) int {
offset := 0
- if p.IsSetCommittedVersion() {
- offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "committed_version", thrift.I64, 5)
- offset += bthrift.Binary.WriteI64(buf[offset:], *p.CommittedVersion)
+ if p.IsSetVersion() {
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "version", thrift.I64, 5)
+ offset += bthrift.Binary.WriteI64(buf[offset:], *p.Version)
offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
}
@@ -8849,6 +10122,17 @@ func (p *TCloneReq) fastWriteField12(buf []byte, binaryWriter bthrift.BinaryWrit
return offset
}
+func (p *TCloneReq) fastWriteField13(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ if p.IsSetTableId() {
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "table_id", thrift.I64, 13)
+ offset += bthrift.Binary.WriteI64(buf[offset:], p.TableId)
+
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ }
+ return offset
+}
+
func (p *TCloneReq) field1Length() int {
l := 0
l += bthrift.Binary.FieldBeginLength("tablet_id", thrift.I64, 1)
@@ -8892,9 +10176,9 @@ func (p *TCloneReq) field4Length() int {
func (p *TCloneReq) field5Length() int {
l := 0
- if p.IsSetCommittedVersion() {
- l += bthrift.Binary.FieldBeginLength("committed_version", thrift.I64, 5)
- l += bthrift.Binary.I64Length(*p.CommittedVersion)
+ if p.IsSetVersion() {
+ l += bthrift.Binary.FieldBeginLength("version", thrift.I64, 5)
+ l += bthrift.Binary.I64Length(*p.Version)
l += bthrift.Binary.FieldEndLength()
}
@@ -8978,6 +10262,17 @@ func (p *TCloneReq) field12Length() int {
return l
}
+func (p *TCloneReq) field13Length() int {
+ l := 0
+ if p.IsSetTableId() {
+ l += bthrift.Binary.FieldBeginLength("table_id", thrift.I64, 13)
+ l += bthrift.Binary.I64Length(p.TableId)
+
+ l += bthrift.Binary.FieldEndLength()
+ }
+ return l
+}
+
func (p *TCompactionReq) FastRead(buf []byte) (int, error) {
var err error
var offset int
@@ -11814,6 +13109,20 @@ func (p *TSnapshotRequest) FastRead(buf []byte) (int, error) {
goto SkipFieldError
}
}
+ case 14:
+ if fieldTypeId == thrift.I64 {
+ l, err = p.FastReadField14(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
default:
l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
offset += l
@@ -12049,6 +13358,19 @@ func (p *TSnapshotRequest) FastReadField13(buf []byte) (int, error) {
return offset, nil
}
+func (p *TSnapshotRequest) FastReadField14(buf []byte) (int, error) {
+ offset := 0
+
+ if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+ p.RefTabletId = &v
+
+ }
+ return offset, nil
+}
+
// for compatibility
func (p *TSnapshotRequest) FastWrite(buf []byte) int {
return 0
@@ -12070,6 +13392,7 @@ func (p *TSnapshotRequest) FastWriteNocopy(buf []byte, binaryWriter bthrift.Bina
offset += p.fastWriteField11(buf[offset:], binaryWriter)
offset += p.fastWriteField12(buf[offset:], binaryWriter)
offset += p.fastWriteField13(buf[offset:], binaryWriter)
+ offset += p.fastWriteField14(buf[offset:], binaryWriter)
offset += p.fastWriteField6(buf[offset:], binaryWriter)
}
offset += bthrift.Binary.WriteFieldStop(buf[offset:])
@@ -12094,6 +13417,7 @@ func (p *TSnapshotRequest) BLength() int {
l += p.field11Length()
l += p.field12Length()
l += p.field13Length()
+ l += p.field14Length()
}
l += bthrift.Binary.FieldStopLength()
l += bthrift.Binary.StructEndLength()
@@ -12247,6 +13571,17 @@ func (p *TSnapshotRequest) fastWriteField13(buf []byte, binaryWriter bthrift.Bin
return offset
}
+func (p *TSnapshotRequest) fastWriteField14(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ if p.IsSetRefTabletId() {
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "ref_tablet_id", thrift.I64, 14)
+ offset += bthrift.Binary.WriteI64(buf[offset:], *p.RefTabletId)
+
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ }
+ return offset
+}
+
func (p *TSnapshotRequest) field1Length() int {
l := 0
l += bthrift.Binary.FieldBeginLength("tablet_id", thrift.I64, 1)
@@ -12388,6 +13723,17 @@ func (p *TSnapshotRequest) field13Length() int {
return l
}
+func (p *TSnapshotRequest) field14Length() int {
+ l := 0
+ if p.IsSetRefTabletId() {
+ l += bthrift.Binary.FieldBeginLength("ref_tablet_id", thrift.I64, 14)
+ l += bthrift.Binary.I64Length(*p.RefTabletId)
+
+ l += bthrift.Binary.FieldEndLength()
+ }
+ return l
+}
+
func (p *TReleaseSnapshotRequest) FastRead(buf []byte) (int, error) {
var err error
var offset int
@@ -13233,150 +14579,1286 @@ func (p *TMoveDirReq) FastReadField4(buf []byte) (int, error) {
func (p *TMoveDirReq) FastReadField5(buf []byte) (int, error) {
offset := 0
- if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil {
+ if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+
+ p.Overwrite = v
+
+ }
+ return offset, nil
+}
+
+// for compatibility
+func (p *TMoveDirReq) FastWrite(buf []byte) int {
+ return 0
+}
+
+func (p *TMoveDirReq) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TMoveDirReq")
+ if p != nil {
+ offset += p.fastWriteField1(buf[offset:], binaryWriter)
+ offset += p.fastWriteField2(buf[offset:], binaryWriter)
+ offset += p.fastWriteField4(buf[offset:], binaryWriter)
+ offset += p.fastWriteField5(buf[offset:], binaryWriter)
+ offset += p.fastWriteField3(buf[offset:], binaryWriter)
+ }
+ offset += bthrift.Binary.WriteFieldStop(buf[offset:])
+ offset += bthrift.Binary.WriteStructEnd(buf[offset:])
+ return offset
+}
+
+func (p *TMoveDirReq) BLength() int {
+ l := 0
+ l += bthrift.Binary.StructBeginLength("TMoveDirReq")
+ if p != nil {
+ l += p.field1Length()
+ l += p.field2Length()
+ l += p.field3Length()
+ l += p.field4Length()
+ l += p.field5Length()
+ }
+ l += bthrift.Binary.FieldStopLength()
+ l += bthrift.Binary.StructEndLength()
+ return l
+}
+
+func (p *TMoveDirReq) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "tablet_id", thrift.I64, 1)
+ offset += bthrift.Binary.WriteI64(buf[offset:], p.TabletId)
+
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ return offset
+}
+
+func (p *TMoveDirReq) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "schema_hash", thrift.I32, 2)
+ offset += bthrift.Binary.WriteI32(buf[offset:], p.SchemaHash)
+
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ return offset
+}
+
+func (p *TMoveDirReq) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "src", thrift.STRING, 3)
+ offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, p.Src)
+
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ return offset
+}
+
+func (p *TMoveDirReq) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "job_id", thrift.I64, 4)
+ offset += bthrift.Binary.WriteI64(buf[offset:], p.JobId)
+
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ return offset
+}
+
+func (p *TMoveDirReq) fastWriteField5(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "overwrite", thrift.BOOL, 5)
+ offset += bthrift.Binary.WriteBool(buf[offset:], p.Overwrite)
+
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ return offset
+}
+
+func (p *TMoveDirReq) field1Length() int {
+ l := 0
+ l += bthrift.Binary.FieldBeginLength("tablet_id", thrift.I64, 1)
+ l += bthrift.Binary.I64Length(p.TabletId)
+
+ l += bthrift.Binary.FieldEndLength()
+ return l
+}
+
+func (p *TMoveDirReq) field2Length() int {
+ l := 0
+ l += bthrift.Binary.FieldBeginLength("schema_hash", thrift.I32, 2)
+ l += bthrift.Binary.I32Length(p.SchemaHash)
+
+ l += bthrift.Binary.FieldEndLength()
+ return l
+}
+
+func (p *TMoveDirReq) field3Length() int {
+ l := 0
+ l += bthrift.Binary.FieldBeginLength("src", thrift.STRING, 3)
+ l += bthrift.Binary.StringLengthNocopy(p.Src)
+
+ l += bthrift.Binary.FieldEndLength()
+ return l
+}
+
+func (p *TMoveDirReq) field4Length() int {
+ l := 0
+ l += bthrift.Binary.FieldBeginLength("job_id", thrift.I64, 4)
+ l += bthrift.Binary.I64Length(p.JobId)
+
+ l += bthrift.Binary.FieldEndLength()
+ return l
+}
+
+func (p *TMoveDirReq) field5Length() int {
+ l := 0
+ l += bthrift.Binary.FieldBeginLength("overwrite", thrift.BOOL, 5)
+ l += bthrift.Binary.BoolLength(p.Overwrite)
+
+ l += bthrift.Binary.FieldEndLength()
+ return l
+}
+
+func (p *TPublishVersionRequest) FastRead(buf []byte) (int, error) {
+ var err error
+ var offset int
+ var l int
+ var fieldTypeId thrift.TType
+ var fieldId int16
+ var issetTransactionId bool = false
+ var issetPartitionVersionInfos bool = false
+ _, l, err = bthrift.Binary.ReadStructBegin(buf)
+ offset += l
+ if err != nil {
+ goto ReadStructBeginError
+ }
+
+ for {
+ _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldBeginError
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+ switch fieldId {
+ case 1:
+ if fieldTypeId == thrift.I64 {
+ l, err = p.FastReadField1(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ issetTransactionId = true
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+ case 2:
+ if fieldTypeId == thrift.LIST {
+ l, err = p.FastReadField2(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ issetPartitionVersionInfos = true
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+ case 3:
+ if fieldTypeId == thrift.BOOL {
+ l, err = p.FastReadField3(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+ case 4:
+ if fieldTypeId == thrift.SET {
+ l, err = p.FastReadField4(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+ default:
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+
+ l, err = bthrift.Binary.ReadFieldEnd(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldEndError
+ }
+ }
+ l, err = bthrift.Binary.ReadStructEnd(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadStructEndError
+ }
+
+ if !issetTransactionId {
+ fieldId = 1
+ goto RequiredFieldNotSetError
+ }
+
+ if !issetPartitionVersionInfos {
+ fieldId = 2
+ goto RequiredFieldNotSetError
+ }
+ return offset, nil
+ReadStructBeginError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err)
+ReadFieldBeginError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
+ReadFieldError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TPublishVersionRequest[fieldId]), err)
+SkipFieldError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
+ReadFieldEndError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err)
+ReadStructEndError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+RequiredFieldNotSetError:
+ return offset, thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TPublishVersionRequest[fieldId]))
+}
+
+func (p *TPublishVersionRequest) FastReadField1(buf []byte) (int, error) {
+ offset := 0
+
+ if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+
+ p.TransactionId = v
+
+ }
+ return offset, nil
+}
+
+func (p *TPublishVersionRequest) FastReadField2(buf []byte) (int, error) {
+ offset := 0
+
+ _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:])
+ offset += l
+ if err != nil {
+ return offset, err
+ }
+ p.PartitionVersionInfos = make([]*TPartitionVersionInfo, 0, size)
+ for i := 0; i < size; i++ {
+ _elem := NewTPartitionVersionInfo()
+ if l, err := _elem.FastRead(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+ }
+
+ p.PartitionVersionInfos = append(p.PartitionVersionInfos, _elem)
+ }
+ if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+ }
+ return offset, nil
+}
+
+func (p *TPublishVersionRequest) FastReadField3(buf []byte) (int, error) {
+ offset := 0
+
+ if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+
+ p.StrictMode = v
+
+ }
+ return offset, nil
+}
+
+func (p *TPublishVersionRequest) FastReadField4(buf []byte) (int, error) {
+ offset := 0
+
+ _, size, l, err := bthrift.Binary.ReadSetBegin(buf[offset:])
+ offset += l
+ if err != nil {
+ return offset, err
+ }
+ p.BaseTabletIds = make([]types.TTabletId, 0, size)
+ for i := 0; i < size; i++ {
+ var _elem types.TTabletId
+ if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+
+ _elem = v
+
+ }
+
+ p.BaseTabletIds = append(p.BaseTabletIds, _elem)
+ }
+ if l, err := bthrift.Binary.ReadSetEnd(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+ }
+ return offset, nil
+}
+
+// for compatibility
+func (p *TPublishVersionRequest) FastWrite(buf []byte) int {
+ return 0
+}
+
+func (p *TPublishVersionRequest) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TPublishVersionRequest")
+ if p != nil {
+ offset += p.fastWriteField1(buf[offset:], binaryWriter)
+ offset += p.fastWriteField3(buf[offset:], binaryWriter)
+ offset += p.fastWriteField2(buf[offset:], binaryWriter)
+ offset += p.fastWriteField4(buf[offset:], binaryWriter)
+ }
+ offset += bthrift.Binary.WriteFieldStop(buf[offset:])
+ offset += bthrift.Binary.WriteStructEnd(buf[offset:])
+ return offset
+}
+
+func (p *TPublishVersionRequest) BLength() int {
+ l := 0
+ l += bthrift.Binary.StructBeginLength("TPublishVersionRequest")
+ if p != nil {
+ l += p.field1Length()
+ l += p.field2Length()
+ l += p.field3Length()
+ l += p.field4Length()
+ }
+ l += bthrift.Binary.FieldStopLength()
+ l += bthrift.Binary.StructEndLength()
+ return l
+}
+
+func (p *TPublishVersionRequest) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "transaction_id", thrift.I64, 1)
+ offset += bthrift.Binary.WriteI64(buf[offset:], p.TransactionId)
+
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ return offset
+}
+
+func (p *TPublishVersionRequest) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "partition_version_infos", thrift.LIST, 2)
+ listBeginOffset := offset
+ offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0)
+ var length int
+ for _, v := range p.PartitionVersionInfos {
+ length++
+ offset += v.FastWriteNocopy(buf[offset:], binaryWriter)
+ }
+ bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length)
+ offset += bthrift.Binary.WriteListEnd(buf[offset:])
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ return offset
+}
+
+func (p *TPublishVersionRequest) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ if p.IsSetStrictMode() {
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "strict_mode", thrift.BOOL, 3)
+ offset += bthrift.Binary.WriteBool(buf[offset:], p.StrictMode)
+
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ }
+ return offset
+}
+
+func (p *TPublishVersionRequest) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ if p.IsSetBaseTabletIds() {
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "base_tablet_ids", thrift.SET, 4)
+ setBeginOffset := offset
+ offset += bthrift.Binary.SetBeginLength(thrift.I64, 0)
+
+ for i := 0; i < len(p.BaseTabletIds); i++ {
+ for j := i + 1; j < len(p.BaseTabletIds); j++ {
+ if func(tgt, src types.TTabletId) bool {
+ if tgt != src {
+ return false
+ }
+ return true
+ }(p.BaseTabletIds[i], p.BaseTabletIds[j]) {
+ panic(fmt.Errorf("%T error writing set field: slice is not unique", p.BaseTabletIds[i]))
+ }
+ }
+ }
+ var length int
+ for _, v := range p.BaseTabletIds {
+ length++
+ offset += bthrift.Binary.WriteI64(buf[offset:], v)
+
+ }
+ bthrift.Binary.WriteSetBegin(buf[setBeginOffset:], thrift.I64, length)
+ offset += bthrift.Binary.WriteSetEnd(buf[offset:])
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ }
+ return offset
+}
+
+func (p *TPublishVersionRequest) field1Length() int {
+ l := 0
+ l += bthrift.Binary.FieldBeginLength("transaction_id", thrift.I64, 1)
+ l += bthrift.Binary.I64Length(p.TransactionId)
+
+ l += bthrift.Binary.FieldEndLength()
+ return l
+}
+
+func (p *TPublishVersionRequest) field2Length() int {
+ l := 0
+ l += bthrift.Binary.FieldBeginLength("partition_version_infos", thrift.LIST, 2)
+ l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.PartitionVersionInfos))
+ for _, v := range p.PartitionVersionInfos {
+ l += v.BLength()
+ }
+ l += bthrift.Binary.ListEndLength()
+ l += bthrift.Binary.FieldEndLength()
+ return l
+}
+
+func (p *TPublishVersionRequest) field3Length() int {
+ l := 0
+ if p.IsSetStrictMode() {
+ l += bthrift.Binary.FieldBeginLength("strict_mode", thrift.BOOL, 3)
+ l += bthrift.Binary.BoolLength(p.StrictMode)
+
+ l += bthrift.Binary.FieldEndLength()
+ }
+ return l
+}
+
+func (p *TPublishVersionRequest) field4Length() int {
+ l := 0
+ if p.IsSetBaseTabletIds() {
+ l += bthrift.Binary.FieldBeginLength("base_tablet_ids", thrift.SET, 4)
+ l += bthrift.Binary.SetBeginLength(thrift.I64, len(p.BaseTabletIds))
+
+ for i := 0; i < len(p.BaseTabletIds); i++ {
+ for j := i + 1; j < len(p.BaseTabletIds); j++ {
+ if func(tgt, src types.TTabletId) bool {
+ if tgt != src {
+ return false
+ }
+ return true
+ }(p.BaseTabletIds[i], p.BaseTabletIds[j]) {
+ panic(fmt.Errorf("%T error writing set field: slice is not unique", p.BaseTabletIds[i]))
+ }
+ }
+ }
+ var tmpV types.TTabletId
+ l += bthrift.Binary.I64Length(int64(tmpV)) * len(p.BaseTabletIds)
+ l += bthrift.Binary.SetEndLength()
+ l += bthrift.Binary.FieldEndLength()
+ }
+ return l
+}
+
+func (p *TVisibleVersionReq) FastRead(buf []byte) (int, error) {
+ var err error
+ var offset int
+ var l int
+ var fieldTypeId thrift.TType
+ var fieldId int16
+ var issetPartitionVersion bool = false
+ _, l, err = bthrift.Binary.ReadStructBegin(buf)
+ offset += l
+ if err != nil {
+ goto ReadStructBeginError
+ }
+
+ for {
+ _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldBeginError
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+ switch fieldId {
+ case 1:
+ if fieldTypeId == thrift.MAP {
+ l, err = p.FastReadField1(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ issetPartitionVersion = true
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+ default:
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+
+ l, err = bthrift.Binary.ReadFieldEnd(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldEndError
+ }
+ }
+ l, err = bthrift.Binary.ReadStructEnd(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadStructEndError
+ }
+
+ if !issetPartitionVersion {
+ fieldId = 1
+ goto RequiredFieldNotSetError
+ }
+ return offset, nil
+ReadStructBeginError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err)
+ReadFieldBeginError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
+ReadFieldError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TVisibleVersionReq[fieldId]), err)
+SkipFieldError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
+ReadFieldEndError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err)
+ReadStructEndError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+RequiredFieldNotSetError:
+ return offset, thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TVisibleVersionReq[fieldId]))
+}
+
+func (p *TVisibleVersionReq) FastReadField1(buf []byte) (int, error) {
+ offset := 0
+
+ _, _, size, l, err := bthrift.Binary.ReadMapBegin(buf[offset:])
+ offset += l
+ if err != nil {
+ return offset, err
+ }
+ p.PartitionVersion = make(map[types.TPartitionId]types.TVersion, size)
+ for i := 0; i < size; i++ {
+ var _key types.TPartitionId
+ if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+
+ _key = v
+
+ }
+
+ var _val types.TVersion
+ if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+
+ _val = v
+
+ }
+
+ p.PartitionVersion[_key] = _val
+ }
+ if l, err := bthrift.Binary.ReadMapEnd(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+ }
+ return offset, nil
+}
+
+// for compatibility
+func (p *TVisibleVersionReq) FastWrite(buf []byte) int {
+ return 0
+}
+
+func (p *TVisibleVersionReq) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TVisibleVersionReq")
+ if p != nil {
+ offset += p.fastWriteField1(buf[offset:], binaryWriter)
+ }
+ offset += bthrift.Binary.WriteFieldStop(buf[offset:])
+ offset += bthrift.Binary.WriteStructEnd(buf[offset:])
+ return offset
+}
+
+func (p *TVisibleVersionReq) BLength() int {
+ l := 0
+ l += bthrift.Binary.StructBeginLength("TVisibleVersionReq")
+ if p != nil {
+ l += p.field1Length()
+ }
+ l += bthrift.Binary.FieldStopLength()
+ l += bthrift.Binary.StructEndLength()
+ return l
+}
+
+func (p *TVisibleVersionReq) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "partition_version", thrift.MAP, 1)
+ mapBeginOffset := offset
+ offset += bthrift.Binary.MapBeginLength(thrift.I64, thrift.I64, 0)
+ var length int
+ for k, v := range p.PartitionVersion {
+ length++
+
+ offset += bthrift.Binary.WriteI64(buf[offset:], k)
+
+ offset += bthrift.Binary.WriteI64(buf[offset:], v)
+
+ }
+ bthrift.Binary.WriteMapBegin(buf[mapBeginOffset:], thrift.I64, thrift.I64, length)
+ offset += bthrift.Binary.WriteMapEnd(buf[offset:])
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ return offset
+}
+
+func (p *TVisibleVersionReq) field1Length() int {
+ l := 0
+ l += bthrift.Binary.FieldBeginLength("partition_version", thrift.MAP, 1)
+ l += bthrift.Binary.MapBeginLength(thrift.I64, thrift.I64, len(p.PartitionVersion))
+ var tmpK types.TPartitionId
+ var tmpV types.TVersion
+ l += (bthrift.Binary.I64Length(int64(tmpK)) + bthrift.Binary.I64Length(int64(tmpV))) * len(p.PartitionVersion)
+ l += bthrift.Binary.MapEndLength()
+ l += bthrift.Binary.FieldEndLength()
+ return l
+}
+
+func (p *TCalcDeleteBitmapPartitionInfo) FastRead(buf []byte) (int, error) {
+ var err error
+ var offset int
+ var l int
+ var fieldTypeId thrift.TType
+ var fieldId int16
+ var issetPartitionId bool = false
+ var issetVersion bool = false
+ var issetTabletIds bool = false
+ _, l, err = bthrift.Binary.ReadStructBegin(buf)
+ offset += l
+ if err != nil {
+ goto ReadStructBeginError
+ }
+
+ for {
+ _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldBeginError
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+ switch fieldId {
+ case 1:
+ if fieldTypeId == thrift.I64 {
+ l, err = p.FastReadField1(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ issetPartitionId = true
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+ case 2:
+ if fieldTypeId == thrift.I64 {
+ l, err = p.FastReadField2(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ issetVersion = true
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+ case 3:
+ if fieldTypeId == thrift.LIST {
+ l, err = p.FastReadField3(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ issetTabletIds = true
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+ case 4:
+ if fieldTypeId == thrift.LIST {
+ l, err = p.FastReadField4(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+ case 5:
+ if fieldTypeId == thrift.LIST {
+ l, err = p.FastReadField5(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+ case 6:
+ if fieldTypeId == thrift.LIST {
+ l, err = p.FastReadField6(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+ case 7:
+ if fieldTypeId == thrift.LIST {
+ l, err = p.FastReadField7(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+ default:
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+
+ l, err = bthrift.Binary.ReadFieldEnd(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldEndError
+ }
+ }
+ l, err = bthrift.Binary.ReadStructEnd(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadStructEndError
+ }
+
+ if !issetPartitionId {
+ fieldId = 1
+ goto RequiredFieldNotSetError
+ }
+
+ if !issetVersion {
+ fieldId = 2
+ goto RequiredFieldNotSetError
+ }
+
+ if !issetTabletIds {
+ fieldId = 3
+ goto RequiredFieldNotSetError
+ }
+ return offset, nil
+ReadStructBeginError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err)
+ReadFieldBeginError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
+ReadFieldError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TCalcDeleteBitmapPartitionInfo[fieldId]), err)
+SkipFieldError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
+ReadFieldEndError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err)
+ReadStructEndError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+RequiredFieldNotSetError:
+ return offset, thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TCalcDeleteBitmapPartitionInfo[fieldId]))
+}
+
+func (p *TCalcDeleteBitmapPartitionInfo) FastReadField1(buf []byte) (int, error) {
+ offset := 0
+
+ if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+
+ p.PartitionId = v
+
+ }
+ return offset, nil
+}
+
+func (p *TCalcDeleteBitmapPartitionInfo) FastReadField2(buf []byte) (int, error) {
+ offset := 0
+
+ if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+
+ p.Version = v
+
+ }
+ return offset, nil
+}
+
+func (p *TCalcDeleteBitmapPartitionInfo) FastReadField3(buf []byte) (int, error) {
+ offset := 0
+
+ _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:])
+ offset += l
+ if err != nil {
+ return offset, err
+ }
+ p.TabletIds = make([]types.TTabletId, 0, size)
+ for i := 0; i < size; i++ {
+ var _elem types.TTabletId
+ if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+
+ _elem = v
+
+ }
+
+ p.TabletIds = append(p.TabletIds, _elem)
+ }
+ if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+ }
+ return offset, nil
+}
+
+func (p *TCalcDeleteBitmapPartitionInfo) FastReadField4(buf []byte) (int, error) {
+ offset := 0
+
+ _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:])
+ offset += l
+ if err != nil {
+ return offset, err
+ }
+ p.BaseCompactionCnts = make([]int64, 0, size)
+ for i := 0; i < size; i++ {
+ var _elem int64
+ if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+
+ _elem = v
+
+ }
+
+ p.BaseCompactionCnts = append(p.BaseCompactionCnts, _elem)
+ }
+ if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+ }
+ return offset, nil
+}
+
+func (p *TCalcDeleteBitmapPartitionInfo) FastReadField5(buf []byte) (int, error) {
+ offset := 0
+
+ _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:])
+ offset += l
+ if err != nil {
+ return offset, err
+ }
+ p.CumulativeCompactionCnts = make([]int64, 0, size)
+ for i := 0; i < size; i++ {
+ var _elem int64
+ if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+
+ _elem = v
+
+ }
+
+ p.CumulativeCompactionCnts = append(p.CumulativeCompactionCnts, _elem)
+ }
+ if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+ }
+ return offset, nil
+}
+
+func (p *TCalcDeleteBitmapPartitionInfo) FastReadField6(buf []byte) (int, error) {
+ offset := 0
+
+ _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:])
+ offset += l
+ if err != nil {
+ return offset, err
+ }
+ p.CumulativePoints = make([]int64, 0, size)
+ for i := 0; i < size; i++ {
+ var _elem int64
+ if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+
+ _elem = v
+
+ }
+
+ p.CumulativePoints = append(p.CumulativePoints, _elem)
+ }
+ if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+ }
+ return offset, nil
+}
+
+func (p *TCalcDeleteBitmapPartitionInfo) FastReadField7(buf []byte) (int, error) {
+ offset := 0
+
+ _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:])
+ offset += l
+ if err != nil {
+ return offset, err
+ }
+ p.SubTxnIds = make([]int64, 0, size)
+ for i := 0; i < size; i++ {
+ var _elem int64
+ if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+
+ _elem = v
+
+ }
+
+ p.SubTxnIds = append(p.SubTxnIds, _elem)
+ }
+ if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil {
return offset, err
} else {
offset += l
-
- p.Overwrite = v
-
}
return offset, nil
}
// for compatibility
-func (p *TMoveDirReq) FastWrite(buf []byte) int {
+func (p *TCalcDeleteBitmapPartitionInfo) FastWrite(buf []byte) int {
return 0
}
-func (p *TMoveDirReq) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+func (p *TCalcDeleteBitmapPartitionInfo) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int {
offset := 0
- offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TMoveDirReq")
+ offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TCalcDeleteBitmapPartitionInfo")
if p != nil {
offset += p.fastWriteField1(buf[offset:], binaryWriter)
offset += p.fastWriteField2(buf[offset:], binaryWriter)
+ offset += p.fastWriteField3(buf[offset:], binaryWriter)
offset += p.fastWriteField4(buf[offset:], binaryWriter)
offset += p.fastWriteField5(buf[offset:], binaryWriter)
- offset += p.fastWriteField3(buf[offset:], binaryWriter)
+ offset += p.fastWriteField6(buf[offset:], binaryWriter)
+ offset += p.fastWriteField7(buf[offset:], binaryWriter)
}
offset += bthrift.Binary.WriteFieldStop(buf[offset:])
offset += bthrift.Binary.WriteStructEnd(buf[offset:])
return offset
}
-func (p *TMoveDirReq) BLength() int {
+func (p *TCalcDeleteBitmapPartitionInfo) BLength() int {
l := 0
- l += bthrift.Binary.StructBeginLength("TMoveDirReq")
+ l += bthrift.Binary.StructBeginLength("TCalcDeleteBitmapPartitionInfo")
if p != nil {
l += p.field1Length()
l += p.field2Length()
l += p.field3Length()
l += p.field4Length()
l += p.field5Length()
+ l += p.field6Length()
+ l += p.field7Length()
}
l += bthrift.Binary.FieldStopLength()
l += bthrift.Binary.StructEndLength()
return l
}
-func (p *TMoveDirReq) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+func (p *TCalcDeleteBitmapPartitionInfo) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int {
offset := 0
- offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "tablet_id", thrift.I64, 1)
- offset += bthrift.Binary.WriteI64(buf[offset:], p.TabletId)
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "partition_id", thrift.I64, 1)
+ offset += bthrift.Binary.WriteI64(buf[offset:], p.PartitionId)
offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
return offset
}
-func (p *TMoveDirReq) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+func (p *TCalcDeleteBitmapPartitionInfo) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int {
offset := 0
- offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "schema_hash", thrift.I32, 2)
- offset += bthrift.Binary.WriteI32(buf[offset:], p.SchemaHash)
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "version", thrift.I64, 2)
+ offset += bthrift.Binary.WriteI64(buf[offset:], p.Version)
offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
return offset
}
-func (p *TMoveDirReq) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+func (p *TCalcDeleteBitmapPartitionInfo) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int {
offset := 0
- offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "src", thrift.STRING, 3)
- offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, p.Src)
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "tablet_ids", thrift.LIST, 3)
+ listBeginOffset := offset
+ offset += bthrift.Binary.ListBeginLength(thrift.I64, 0)
+ var length int
+ for _, v := range p.TabletIds {
+ length++
+ offset += bthrift.Binary.WriteI64(buf[offset:], v)
+ }
+ bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.I64, length)
+ offset += bthrift.Binary.WriteListEnd(buf[offset:])
offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
return offset
}
-func (p *TMoveDirReq) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+func (p *TCalcDeleteBitmapPartitionInfo) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int {
offset := 0
- offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "job_id", thrift.I64, 4)
- offset += bthrift.Binary.WriteI64(buf[offset:], p.JobId)
+ if p.IsSetBaseCompactionCnts() {
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "base_compaction_cnts", thrift.LIST, 4)
+ listBeginOffset := offset
+ offset += bthrift.Binary.ListBeginLength(thrift.I64, 0)
+ var length int
+ for _, v := range p.BaseCompactionCnts {
+ length++
+ offset += bthrift.Binary.WriteI64(buf[offset:], v)
- offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ }
+ bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.I64, length)
+ offset += bthrift.Binary.WriteListEnd(buf[offset:])
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ }
return offset
}
-func (p *TMoveDirReq) fastWriteField5(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+func (p *TCalcDeleteBitmapPartitionInfo) fastWriteField5(buf []byte, binaryWriter bthrift.BinaryWriter) int {
offset := 0
- offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "overwrite", thrift.BOOL, 5)
- offset += bthrift.Binary.WriteBool(buf[offset:], p.Overwrite)
+ if p.IsSetCumulativeCompactionCnts() {
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "cumulative_compaction_cnts", thrift.LIST, 5)
+ listBeginOffset := offset
+ offset += bthrift.Binary.ListBeginLength(thrift.I64, 0)
+ var length int
+ for _, v := range p.CumulativeCompactionCnts {
+ length++
+ offset += bthrift.Binary.WriteI64(buf[offset:], v)
- offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ }
+ bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.I64, length)
+ offset += bthrift.Binary.WriteListEnd(buf[offset:])
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ }
return offset
}
-func (p *TMoveDirReq) field1Length() int {
+func (p *TCalcDeleteBitmapPartitionInfo) fastWriteField6(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ if p.IsSetCumulativePoints() {
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "cumulative_points", thrift.LIST, 6)
+ listBeginOffset := offset
+ offset += bthrift.Binary.ListBeginLength(thrift.I64, 0)
+ var length int
+ for _, v := range p.CumulativePoints {
+ length++
+ offset += bthrift.Binary.WriteI64(buf[offset:], v)
+
+ }
+ bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.I64, length)
+ offset += bthrift.Binary.WriteListEnd(buf[offset:])
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ }
+ return offset
+}
+
+func (p *TCalcDeleteBitmapPartitionInfo) fastWriteField7(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ if p.IsSetSubTxnIds() {
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "sub_txn_ids", thrift.LIST, 7)
+ listBeginOffset := offset
+ offset += bthrift.Binary.ListBeginLength(thrift.I64, 0)
+ var length int
+ for _, v := range p.SubTxnIds {
+ length++
+ offset += bthrift.Binary.WriteI64(buf[offset:], v)
+
+ }
+ bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.I64, length)
+ offset += bthrift.Binary.WriteListEnd(buf[offset:])
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ }
+ return offset
+}
+
+func (p *TCalcDeleteBitmapPartitionInfo) field1Length() int {
l := 0
- l += bthrift.Binary.FieldBeginLength("tablet_id", thrift.I64, 1)
- l += bthrift.Binary.I64Length(p.TabletId)
+ l += bthrift.Binary.FieldBeginLength("partition_id", thrift.I64, 1)
+ l += bthrift.Binary.I64Length(p.PartitionId)
l += bthrift.Binary.FieldEndLength()
return l
}
-func (p *TMoveDirReq) field2Length() int {
+func (p *TCalcDeleteBitmapPartitionInfo) field2Length() int {
l := 0
- l += bthrift.Binary.FieldBeginLength("schema_hash", thrift.I32, 2)
- l += bthrift.Binary.I32Length(p.SchemaHash)
+ l += bthrift.Binary.FieldBeginLength("version", thrift.I64, 2)
+ l += bthrift.Binary.I64Length(p.Version)
l += bthrift.Binary.FieldEndLength()
return l
}
-func (p *TMoveDirReq) field3Length() int {
+func (p *TCalcDeleteBitmapPartitionInfo) field3Length() int {
l := 0
- l += bthrift.Binary.FieldBeginLength("src", thrift.STRING, 3)
- l += bthrift.Binary.StringLengthNocopy(p.Src)
-
+ l += bthrift.Binary.FieldBeginLength("tablet_ids", thrift.LIST, 3)
+ l += bthrift.Binary.ListBeginLength(thrift.I64, len(p.TabletIds))
+ var tmpV types.TTabletId
+ l += bthrift.Binary.I64Length(int64(tmpV)) * len(p.TabletIds)
+ l += bthrift.Binary.ListEndLength()
l += bthrift.Binary.FieldEndLength()
return l
}
-func (p *TMoveDirReq) field4Length() int {
+func (p *TCalcDeleteBitmapPartitionInfo) field4Length() int {
l := 0
- l += bthrift.Binary.FieldBeginLength("job_id", thrift.I64, 4)
- l += bthrift.Binary.I64Length(p.JobId)
+ if p.IsSetBaseCompactionCnts() {
+ l += bthrift.Binary.FieldBeginLength("base_compaction_cnts", thrift.LIST, 4)
+ l += bthrift.Binary.ListBeginLength(thrift.I64, len(p.BaseCompactionCnts))
+ var tmpV int64
+ l += bthrift.Binary.I64Length(int64(tmpV)) * len(p.BaseCompactionCnts)
+ l += bthrift.Binary.ListEndLength()
+ l += bthrift.Binary.FieldEndLength()
+ }
+ return l
+}
- l += bthrift.Binary.FieldEndLength()
+func (p *TCalcDeleteBitmapPartitionInfo) field5Length() int {
+ l := 0
+ if p.IsSetCumulativeCompactionCnts() {
+ l += bthrift.Binary.FieldBeginLength("cumulative_compaction_cnts", thrift.LIST, 5)
+ l += bthrift.Binary.ListBeginLength(thrift.I64, len(p.CumulativeCompactionCnts))
+ var tmpV int64
+ l += bthrift.Binary.I64Length(int64(tmpV)) * len(p.CumulativeCompactionCnts)
+ l += bthrift.Binary.ListEndLength()
+ l += bthrift.Binary.FieldEndLength()
+ }
return l
}
-func (p *TMoveDirReq) field5Length() int {
+func (p *TCalcDeleteBitmapPartitionInfo) field6Length() int {
l := 0
- l += bthrift.Binary.FieldBeginLength("overwrite", thrift.BOOL, 5)
- l += bthrift.Binary.BoolLength(p.Overwrite)
+ if p.IsSetCumulativePoints() {
+ l += bthrift.Binary.FieldBeginLength("cumulative_points", thrift.LIST, 6)
+ l += bthrift.Binary.ListBeginLength(thrift.I64, len(p.CumulativePoints))
+ var tmpV int64
+ l += bthrift.Binary.I64Length(int64(tmpV)) * len(p.CumulativePoints)
+ l += bthrift.Binary.ListEndLength()
+ l += bthrift.Binary.FieldEndLength()
+ }
+ return l
+}
- l += bthrift.Binary.FieldEndLength()
+func (p *TCalcDeleteBitmapPartitionInfo) field7Length() int {
+ l := 0
+ if p.IsSetSubTxnIds() {
+ l += bthrift.Binary.FieldBeginLength("sub_txn_ids", thrift.LIST, 7)
+ l += bthrift.Binary.ListBeginLength(thrift.I64, len(p.SubTxnIds))
+ var tmpV int64
+ l += bthrift.Binary.I64Length(int64(tmpV)) * len(p.SubTxnIds)
+ l += bthrift.Binary.ListEndLength()
+ l += bthrift.Binary.FieldEndLength()
+ }
return l
}
-func (p *TPublishVersionRequest) FastRead(buf []byte) (int, error) {
+func (p *TCalcDeleteBitmapRequest) FastRead(buf []byte) (int, error) {
var err error
var offset int
var l int
var fieldTypeId thrift.TType
var fieldId int16
var issetTransactionId bool = false
- var issetPartitionVersionInfos bool = false
+ var issetPartitions bool = false
_, l, err = bthrift.Binary.ReadStructBegin(buf)
offset += l
if err != nil {
@@ -13415,21 +15897,7 @@ func (p *TPublishVersionRequest) FastRead(buf []byte) (int, error) {
if err != nil {
goto ReadFieldError
}
- issetPartitionVersionInfos = true
- } else {
- l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
- offset += l
- if err != nil {
- goto SkipFieldError
- }
- }
- case 3:
- if fieldTypeId == thrift.BOOL {
- l, err = p.FastReadField3(buf[offset:])
- offset += l
- if err != nil {
- goto ReadFieldError
- }
+ issetPartitions = true
} else {
l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
offset += l
@@ -13462,7 +15930,7 @@ func (p *TPublishVersionRequest) FastRead(buf []byte) (int, error) {
goto RequiredFieldNotSetError
}
- if !issetPartitionVersionInfos {
+ if !issetPartitions {
fieldId = 2
goto RequiredFieldNotSetError
}
@@ -13472,7 +15940,7 @@ ReadStructBeginError:
ReadFieldBeginError:
return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
ReadFieldError:
- return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TPublishVersionRequest[fieldId]), err)
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TCalcDeleteBitmapRequest[fieldId]), err)
SkipFieldError:
return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
ReadFieldEndError:
@@ -13480,10 +15948,10 @@ ReadFieldEndError:
ReadStructEndError:
return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
RequiredFieldNotSetError:
- return offset, thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TPublishVersionRequest[fieldId]))
+ return offset, thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TCalcDeleteBitmapRequest[fieldId]))
}
-func (p *TPublishVersionRequest) FastReadField1(buf []byte) (int, error) {
+func (p *TCalcDeleteBitmapRequest) FastReadField1(buf []byte) (int, error) {
offset := 0
if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil {
@@ -13497,7 +15965,7 @@ func (p *TPublishVersionRequest) FastReadField1(buf []byte) (int, error) {
return offset, nil
}
-func (p *TPublishVersionRequest) FastReadField2(buf []byte) (int, error) {
+func (p *TCalcDeleteBitmapRequest) FastReadField2(buf []byte) (int, error) {
offset := 0
_, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:])
@@ -13505,16 +15973,16 @@ func (p *TPublishVersionRequest) FastReadField2(buf []byte) (int, error) {
if err != nil {
return offset, err
}
- p.PartitionVersionInfos = make([]*TPartitionVersionInfo, 0, size)
+ p.Partitions = make([]*TCalcDeleteBitmapPartitionInfo, 0, size)
for i := 0; i < size; i++ {
- _elem := NewTPartitionVersionInfo()
+ _elem := NewTCalcDeleteBitmapPartitionInfo()
if l, err := _elem.FastRead(buf[offset:]); err != nil {
return offset, err
} else {
offset += l
}
- p.PartitionVersionInfos = append(p.PartitionVersionInfos, _elem)
+ p.Partitions = append(p.Partitions, _elem)
}
if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil {
return offset, err
@@ -13524,31 +15992,16 @@ func (p *TPublishVersionRequest) FastReadField2(buf []byte) (int, error) {
return offset, nil
}
-func (p *TPublishVersionRequest) FastReadField3(buf []byte) (int, error) {
- offset := 0
-
- if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil {
- return offset, err
- } else {
- offset += l
-
- p.StrictMode = v
-
- }
- return offset, nil
-}
-
// for compatibility
-func (p *TPublishVersionRequest) FastWrite(buf []byte) int {
+func (p *TCalcDeleteBitmapRequest) FastWrite(buf []byte) int {
return 0
}
-func (p *TPublishVersionRequest) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+func (p *TCalcDeleteBitmapRequest) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int {
offset := 0
- offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TPublishVersionRequest")
+ offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TCalcDeleteBitmapRequest")
if p != nil {
offset += p.fastWriteField1(buf[offset:], binaryWriter)
- offset += p.fastWriteField3(buf[offset:], binaryWriter)
offset += p.fastWriteField2(buf[offset:], binaryWriter)
}
offset += bthrift.Binary.WriteFieldStop(buf[offset:])
@@ -13556,20 +16009,19 @@ func (p *TPublishVersionRequest) FastWriteNocopy(buf []byte, binaryWriter bthrif
return offset
}
-func (p *TPublishVersionRequest) BLength() int {
+func (p *TCalcDeleteBitmapRequest) BLength() int {
l := 0
- l += bthrift.Binary.StructBeginLength("TPublishVersionRequest")
+ l += bthrift.Binary.StructBeginLength("TCalcDeleteBitmapRequest")
if p != nil {
l += p.field1Length()
l += p.field2Length()
- l += p.field3Length()
}
l += bthrift.Binary.FieldStopLength()
l += bthrift.Binary.StructEndLength()
return l
}
-func (p *TPublishVersionRequest) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+func (p *TCalcDeleteBitmapRequest) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int {
offset := 0
offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "transaction_id", thrift.I64, 1)
offset += bthrift.Binary.WriteI64(buf[offset:], p.TransactionId)
@@ -13578,13 +16030,13 @@ func (p *TPublishVersionRequest) fastWriteField1(buf []byte, binaryWriter bthrif
return offset
}
-func (p *TPublishVersionRequest) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+func (p *TCalcDeleteBitmapRequest) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int {
offset := 0
- offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "partition_version_infos", thrift.LIST, 2)
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "partitions", thrift.LIST, 2)
listBeginOffset := offset
offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0)
var length int
- for _, v := range p.PartitionVersionInfos {
+ for _, v := range p.Partitions {
length++
offset += v.FastWriteNocopy(buf[offset:], binaryWriter)
}
@@ -13594,18 +16046,7 @@ func (p *TPublishVersionRequest) fastWriteField2(buf []byte, binaryWriter bthrif
return offset
}
-func (p *TPublishVersionRequest) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int {
- offset := 0
- if p.IsSetStrictMode() {
- offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "strict_mode", thrift.BOOL, 3)
- offset += bthrift.Binary.WriteBool(buf[offset:], p.StrictMode)
-
- offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
- }
- return offset
-}
-
-func (p *TPublishVersionRequest) field1Length() int {
+func (p *TCalcDeleteBitmapRequest) field1Length() int {
l := 0
l += bthrift.Binary.FieldBeginLength("transaction_id", thrift.I64, 1)
l += bthrift.Binary.I64Length(p.TransactionId)
@@ -13614,11 +16055,11 @@ func (p *TPublishVersionRequest) field1Length() int {
return l
}
-func (p *TPublishVersionRequest) field2Length() int {
+func (p *TCalcDeleteBitmapRequest) field2Length() int {
l := 0
- l += bthrift.Binary.FieldBeginLength("partition_version_infos", thrift.LIST, 2)
- l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.PartitionVersionInfos))
- for _, v := range p.PartitionVersionInfos {
+ l += bthrift.Binary.FieldBeginLength("partitions", thrift.LIST, 2)
+ l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.Partitions))
+ for _, v := range p.Partitions {
l += v.BLength()
}
l += bthrift.Binary.ListEndLength()
@@ -13626,17 +16067,6 @@ func (p *TPublishVersionRequest) field2Length() int {
return l
}
-func (p *TPublishVersionRequest) field3Length() int {
- l := 0
- if p.IsSetStrictMode() {
- l += bthrift.Binary.FieldBeginLength("strict_mode", thrift.BOOL, 3)
- l += bthrift.Binary.BoolLength(p.StrictMode)
-
- l += bthrift.Binary.FieldEndLength()
- }
- return l
-}
-
func (p *TClearAlterTaskRequest) FastRead(buf []byte) (int, error) {
var err error
var offset int
@@ -14469,9 +16899,51 @@ func (p *TTabletMetaInfo) FastRead(buf []byte) (int, error) {
goto SkipFieldError
}
}
- case 11:
- if fieldTypeId == thrift.I64 {
- l, err = p.FastReadField11(buf[offset:])
+ case 11:
+ if fieldTypeId == thrift.I64 {
+ l, err = p.FastReadField11(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+ case 12:
+ if fieldTypeId == thrift.I64 {
+ l, err = p.FastReadField12(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+ case 13:
+ if fieldTypeId == thrift.I64 {
+ l, err = p.FastReadField13(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+ case 14:
+ if fieldTypeId == thrift.BOOL {
+ l, err = p.FastReadField14(buf[offset:])
offset += l
if err != nil {
goto ReadFieldError
@@ -14483,9 +16955,9 @@ func (p *TTabletMetaInfo) FastRead(buf []byte) (int, error) {
goto SkipFieldError
}
}
- case 12:
- if fieldTypeId == thrift.I64 {
- l, err = p.FastReadField12(buf[offset:])
+ case 15:
+ if fieldTypeId == thrift.BOOL {
+ l, err = p.FastReadField15(buf[offset:])
offset += l
if err != nil {
goto ReadFieldError
@@ -14497,9 +16969,9 @@ func (p *TTabletMetaInfo) FastRead(buf []byte) (int, error) {
goto SkipFieldError
}
}
- case 13:
- if fieldTypeId == thrift.I64 {
- l, err = p.FastReadField13(buf[offset:])
+ case 16:
+ if fieldTypeId == thrift.BOOL {
+ l, err = p.FastReadField16(buf[offset:])
offset += l
if err != nil {
goto ReadFieldError
@@ -14511,9 +16983,9 @@ func (p *TTabletMetaInfo) FastRead(buf []byte) (int, error) {
goto SkipFieldError
}
}
- case 14:
- if fieldTypeId == thrift.BOOL {
- l, err = p.FastReadField14(buf[offset:])
+ case 17:
+ if fieldTypeId == thrift.I64 {
+ l, err = p.FastReadField17(buf[offset:])
offset += l
if err != nil {
goto ReadFieldError
@@ -14525,9 +16997,9 @@ func (p *TTabletMetaInfo) FastRead(buf []byte) (int, error) {
goto SkipFieldError
}
}
- case 15:
- if fieldTypeId == thrift.BOOL {
- l, err = p.FastReadField15(buf[offset:])
+ case 18:
+ if fieldTypeId == thrift.I64 {
+ l, err = p.FastReadField18(buf[offset:])
offset += l
if err != nil {
goto ReadFieldError
@@ -14743,6 +17215,45 @@ func (p *TTabletMetaInfo) FastReadField15(buf []byte) (int, error) {
return offset, nil
}
+func (p *TTabletMetaInfo) FastReadField16(buf []byte) (int, error) {
+ offset := 0
+
+ if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+ p.DisableAutoCompaction = &v
+
+ }
+ return offset, nil
+}
+
+func (p *TTabletMetaInfo) FastReadField17(buf []byte) (int, error) {
+ offset := 0
+
+ if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+ p.TimeSeriesCompactionEmptyRowsetsThreshold = &v
+
+ }
+ return offset, nil
+}
+
+func (p *TTabletMetaInfo) FastReadField18(buf []byte) (int, error) {
+ offset := 0
+
+ if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+ p.TimeSeriesCompactionLevelThreshold = &v
+
+ }
+ return offset, nil
+}
+
// for compatibility
func (p *TTabletMetaInfo) FastWrite(buf []byte) int {
return 0
@@ -14763,6 +17274,9 @@ func (p *TTabletMetaInfo) FastWriteNocopy(buf []byte, binaryWriter bthrift.Binar
offset += p.fastWriteField13(buf[offset:], binaryWriter)
offset += p.fastWriteField14(buf[offset:], binaryWriter)
offset += p.fastWriteField15(buf[offset:], binaryWriter)
+ offset += p.fastWriteField16(buf[offset:], binaryWriter)
+ offset += p.fastWriteField17(buf[offset:], binaryWriter)
+ offset += p.fastWriteField18(buf[offset:], binaryWriter)
offset += p.fastWriteField9(buf[offset:], binaryWriter)
offset += p.fastWriteField10(buf[offset:], binaryWriter)
}
@@ -14788,6 +17302,9 @@ func (p *TTabletMetaInfo) BLength() int {
l += p.field13Length()
l += p.field14Length()
l += p.field15Length()
+ l += p.field16Length()
+ l += p.field17Length()
+ l += p.field18Length()
}
l += bthrift.Binary.FieldStopLength()
l += bthrift.Binary.StructEndLength()
@@ -14936,6 +17453,39 @@ func (p *TTabletMetaInfo) fastWriteField15(buf []byte, binaryWriter bthrift.Bina
return offset
}
+func (p *TTabletMetaInfo) fastWriteField16(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ if p.IsSetDisableAutoCompaction() {
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "disable_auto_compaction", thrift.BOOL, 16)
+ offset += bthrift.Binary.WriteBool(buf[offset:], *p.DisableAutoCompaction)
+
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ }
+ return offset
+}
+
+func (p *TTabletMetaInfo) fastWriteField17(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ if p.IsSetTimeSeriesCompactionEmptyRowsetsThreshold() {
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "time_series_compaction_empty_rowsets_threshold", thrift.I64, 17)
+ offset += bthrift.Binary.WriteI64(buf[offset:], *p.TimeSeriesCompactionEmptyRowsetsThreshold)
+
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ }
+ return offset
+}
+
+func (p *TTabletMetaInfo) fastWriteField18(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ if p.IsSetTimeSeriesCompactionLevelThreshold() {
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "time_series_compaction_level_threshold", thrift.I64, 18)
+ offset += bthrift.Binary.WriteI64(buf[offset:], *p.TimeSeriesCompactionLevelThreshold)
+
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ }
+ return offset
+}
+
func (p *TTabletMetaInfo) field1Length() int {
l := 0
if p.IsSetTabletId() {
@@ -15078,6 +17628,39 @@ func (p *TTabletMetaInfo) field15Length() int {
return l
}
+func (p *TTabletMetaInfo) field16Length() int {
+ l := 0
+ if p.IsSetDisableAutoCompaction() {
+ l += bthrift.Binary.FieldBeginLength("disable_auto_compaction", thrift.BOOL, 16)
+ l += bthrift.Binary.BoolLength(*p.DisableAutoCompaction)
+
+ l += bthrift.Binary.FieldEndLength()
+ }
+ return l
+}
+
+func (p *TTabletMetaInfo) field17Length() int {
+ l := 0
+ if p.IsSetTimeSeriesCompactionEmptyRowsetsThreshold() {
+ l += bthrift.Binary.FieldBeginLength("time_series_compaction_empty_rowsets_threshold", thrift.I64, 17)
+ l += bthrift.Binary.I64Length(*p.TimeSeriesCompactionEmptyRowsetsThreshold)
+
+ l += bthrift.Binary.FieldEndLength()
+ }
+ return l
+}
+
+func (p *TTabletMetaInfo) field18Length() int {
+ l := 0
+ if p.IsSetTimeSeriesCompactionLevelThreshold() {
+ l += bthrift.Binary.FieldBeginLength("time_series_compaction_level_threshold", thrift.I64, 18)
+ l += bthrift.Binary.I64Length(*p.TimeSeriesCompactionLevelThreshold)
+
+ l += bthrift.Binary.FieldEndLength()
+ }
+ return l
+}
+
func (p *TUpdateTabletMetaInfoReq) FastRead(buf []byte) (int, error) {
var err error
var offset int
@@ -16407,6 +18990,62 @@ func (p *TAgentTaskRequest) FastRead(buf []byte) (int, error) {
goto SkipFieldError
}
}
+ case 34:
+ if fieldTypeId == thrift.STRUCT {
+ l, err = p.FastReadField34(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+ case 35:
+ if fieldTypeId == thrift.STRUCT {
+ l, err = p.FastReadField35(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+ case 36:
+ if fieldTypeId == thrift.STRUCT {
+ l, err = p.FastReadField36(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+ case 1000:
+ if fieldTypeId == thrift.STRUCT {
+ l, err = p.FastReadField1000(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
default:
l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
offset += l
@@ -16879,6 +19518,58 @@ func (p *TAgentTaskRequest) FastReadField33(buf []byte) (int, error) {
return offset, nil
}
+func (p *TAgentTaskRequest) FastReadField34(buf []byte) (int, error) {
+ offset := 0
+
+ tmp := NewTCleanTrashReq()
+ if l, err := tmp.FastRead(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+ }
+ p.CleanTrashReq = tmp
+ return offset, nil
+}
+
+func (p *TAgentTaskRequest) FastReadField35(buf []byte) (int, error) {
+ offset := 0
+
+ tmp := NewTVisibleVersionReq()
+ if l, err := tmp.FastRead(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+ }
+ p.VisibleVersionReq = tmp
+ return offset, nil
+}
+
+func (p *TAgentTaskRequest) FastReadField36(buf []byte) (int, error) {
+ offset := 0
+
+ tmp := NewTCleanUDFCacheReq()
+ if l, err := tmp.FastRead(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+ }
+ p.CleanUdfCacheReq = tmp
+ return offset, nil
+}
+
+func (p *TAgentTaskRequest) FastReadField1000(buf []byte) (int, error) {
+ offset := 0
+
+ tmp := NewTCalcDeleteBitmapRequest()
+ if l, err := tmp.FastRead(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+ }
+ p.CalcDeleteBitmapReq = tmp
+ return offset, nil
+}
+
// for compatibility
func (p *TAgentTaskRequest) FastWrite(buf []byte) int {
return 0
@@ -16920,6 +19611,10 @@ func (p *TAgentTaskRequest) FastWriteNocopy(buf []byte, binaryWriter bthrift.Bin
offset += p.fastWriteField31(buf[offset:], binaryWriter)
offset += p.fastWriteField32(buf[offset:], binaryWriter)
offset += p.fastWriteField33(buf[offset:], binaryWriter)
+ offset += p.fastWriteField34(buf[offset:], binaryWriter)
+ offset += p.fastWriteField35(buf[offset:], binaryWriter)
+ offset += p.fastWriteField36(buf[offset:], binaryWriter)
+ offset += p.fastWriteField1000(buf[offset:], binaryWriter)
}
offset += bthrift.Binary.WriteFieldStop(buf[offset:])
offset += bthrift.Binary.WriteStructEnd(buf[offset:])
@@ -16962,6 +19657,10 @@ func (p *TAgentTaskRequest) BLength() int {
l += p.field31Length()
l += p.field32Length()
l += p.field33Length()
+ l += p.field34Length()
+ l += p.field35Length()
+ l += p.field36Length()
+ l += p.field1000Length()
}
l += bthrift.Binary.FieldStopLength()
l += bthrift.Binary.StructEndLength()
@@ -17287,6 +19986,46 @@ func (p *TAgentTaskRequest) fastWriteField33(buf []byte, binaryWriter bthrift.Bi
return offset
}
+func (p *TAgentTaskRequest) fastWriteField34(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ if p.IsSetCleanTrashReq() {
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "clean_trash_req", thrift.STRUCT, 34)
+ offset += p.CleanTrashReq.FastWriteNocopy(buf[offset:], binaryWriter)
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ }
+ return offset
+}
+
+func (p *TAgentTaskRequest) fastWriteField35(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ if p.IsSetVisibleVersionReq() {
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "visible_version_req", thrift.STRUCT, 35)
+ offset += p.VisibleVersionReq.FastWriteNocopy(buf[offset:], binaryWriter)
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ }
+ return offset
+}
+
+func (p *TAgentTaskRequest) fastWriteField36(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ if p.IsSetCleanUdfCacheReq() {
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "clean_udf_cache_req", thrift.STRUCT, 36)
+ offset += p.CleanUdfCacheReq.FastWriteNocopy(buf[offset:], binaryWriter)
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ }
+ return offset
+}
+
+func (p *TAgentTaskRequest) fastWriteField1000(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ if p.IsSetCalcDeleteBitmapReq() {
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "calc_delete_bitmap_req", thrift.STRUCT, 1000)
+ offset += p.CalcDeleteBitmapReq.FastWriteNocopy(buf[offset:], binaryWriter)
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ }
+ return offset
+}
+
func (p *TAgentTaskRequest) field1Length() int {
l := 0
l += bthrift.Binary.FieldBeginLength("protocol_version", thrift.I32, 1)
@@ -17606,6 +20345,46 @@ func (p *TAgentTaskRequest) field33Length() int {
return l
}
+func (p *TAgentTaskRequest) field34Length() int {
+ l := 0
+ if p.IsSetCleanTrashReq() {
+ l += bthrift.Binary.FieldBeginLength("clean_trash_req", thrift.STRUCT, 34)
+ l += p.CleanTrashReq.BLength()
+ l += bthrift.Binary.FieldEndLength()
+ }
+ return l
+}
+
+func (p *TAgentTaskRequest) field35Length() int {
+ l := 0
+ if p.IsSetVisibleVersionReq() {
+ l += bthrift.Binary.FieldBeginLength("visible_version_req", thrift.STRUCT, 35)
+ l += p.VisibleVersionReq.BLength()
+ l += bthrift.Binary.FieldEndLength()
+ }
+ return l
+}
+
+func (p *TAgentTaskRequest) field36Length() int {
+ l := 0
+ if p.IsSetCleanUdfCacheReq() {
+ l += bthrift.Binary.FieldBeginLength("clean_udf_cache_req", thrift.STRUCT, 36)
+ l += p.CleanUdfCacheReq.BLength()
+ l += bthrift.Binary.FieldEndLength()
+ }
+ return l
+}
+
+func (p *TAgentTaskRequest) field1000Length() int {
+ l := 0
+ if p.IsSetCalcDeleteBitmapReq() {
+ l += bthrift.Binary.FieldBeginLength("calc_delete_bitmap_req", thrift.STRUCT, 1000)
+ l += p.CalcDeleteBitmapReq.BLength()
+ l += bthrift.Binary.FieldEndLength()
+ }
+ return l
+}
+
func (p *TAgentResult_) FastRead(buf []byte) (int, error) {
var err error
var offset int
diff --git a/pkg/rpc/kitex_gen/backendservice/BackendService.go b/pkg/rpc/kitex_gen/backendservice/BackendService.go
index b4657654..3b1477c5 100644
--- a/pkg/rpc/kitex_gen/backendservice/BackendService.go
+++ b/pkg/rpc/kitex_gen/backendservice/BackendService.go
@@ -1,13 +1,16 @@
-// Code generated by thriftgo (0.2.7). DO NOT EDIT.
+// Code generated by thriftgo (0.3.13). DO NOT EDIT.
package backendservice
import (
"context"
+ "database/sql"
+ "database/sql/driver"
"fmt"
"github.com/apache/thrift/lib/go/thrift"
"github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/agentservice"
"github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/dorisexternalservice"
+ "github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/frontendservice"
"github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/palointernalservice"
"github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/plannodes"
"github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/status"
@@ -15,6 +18,397 @@ import (
"strings"
)
+type TDownloadType int64
+
+const (
+ TDownloadType_BE TDownloadType = 0
+ TDownloadType_S3 TDownloadType = 1
+)
+
+func (p TDownloadType) String() string {
+ switch p {
+ case TDownloadType_BE:
+ return "BE"
+ case TDownloadType_S3:
+ return "S3"
+ }
+ return ""
+}
+
+func TDownloadTypeFromString(s string) (TDownloadType, error) {
+ switch s {
+ case "BE":
+ return TDownloadType_BE, nil
+ case "S3":
+ return TDownloadType_S3, nil
+ }
+ return TDownloadType(0), fmt.Errorf("not a valid TDownloadType string")
+}
+
+func TDownloadTypePtr(v TDownloadType) *TDownloadType { return &v }
+func (p *TDownloadType) Scan(value interface{}) (err error) {
+ var result sql.NullInt64
+ err = result.Scan(value)
+ *p = TDownloadType(result.Int64)
+ return
+}
+
+func (p *TDownloadType) Value() (driver.Value, error) {
+ if p == nil {
+ return nil, nil
+ }
+ return int64(*p), nil
+}
+
+type TWarmUpTabletsRequestType int64
+
+const (
+ TWarmUpTabletsRequestType_SET_JOB TWarmUpTabletsRequestType = 0
+ TWarmUpTabletsRequestType_SET_BATCH TWarmUpTabletsRequestType = 1
+ TWarmUpTabletsRequestType_GET_CURRENT_JOB_STATE_AND_LEASE TWarmUpTabletsRequestType = 2
+ TWarmUpTabletsRequestType_CLEAR_JOB TWarmUpTabletsRequestType = 3
+)
+
+func (p TWarmUpTabletsRequestType) String() string {
+ switch p {
+ case TWarmUpTabletsRequestType_SET_JOB:
+ return "SET_JOB"
+ case TWarmUpTabletsRequestType_SET_BATCH:
+ return "SET_BATCH"
+ case TWarmUpTabletsRequestType_GET_CURRENT_JOB_STATE_AND_LEASE:
+ return "GET_CURRENT_JOB_STATE_AND_LEASE"
+ case TWarmUpTabletsRequestType_CLEAR_JOB:
+ return "CLEAR_JOB"
+ }
+ return ""
+}
+
+func TWarmUpTabletsRequestTypeFromString(s string) (TWarmUpTabletsRequestType, error) {
+ switch s {
+ case "SET_JOB":
+ return TWarmUpTabletsRequestType_SET_JOB, nil
+ case "SET_BATCH":
+ return TWarmUpTabletsRequestType_SET_BATCH, nil
+ case "GET_CURRENT_JOB_STATE_AND_LEASE":
+ return TWarmUpTabletsRequestType_GET_CURRENT_JOB_STATE_AND_LEASE, nil
+ case "CLEAR_JOB":
+ return TWarmUpTabletsRequestType_CLEAR_JOB, nil
+ }
+ return TWarmUpTabletsRequestType(0), fmt.Errorf("not a valid TWarmUpTabletsRequestType string")
+}
+
+func TWarmUpTabletsRequestTypePtr(v TWarmUpTabletsRequestType) *TWarmUpTabletsRequestType { return &v }
+func (p *TWarmUpTabletsRequestType) Scan(value interface{}) (err error) {
+ var result sql.NullInt64
+ err = result.Scan(value)
+ *p = TWarmUpTabletsRequestType(result.Int64)
+ return
+}
+
+func (p *TWarmUpTabletsRequestType) Value() (driver.Value, error) {
+ if p == nil {
+ return nil, nil
+ }
+ return int64(*p), nil
+}
+
+type TIngestBinlogStatus int64
+
+const (
+ TIngestBinlogStatus_ANALYSIS_ERROR TIngestBinlogStatus = 0
+ TIngestBinlogStatus_UNKNOWN TIngestBinlogStatus = 1
+ TIngestBinlogStatus_NOT_FOUND TIngestBinlogStatus = 2
+ TIngestBinlogStatus_OK TIngestBinlogStatus = 3
+ TIngestBinlogStatus_FAILED TIngestBinlogStatus = 4
+ TIngestBinlogStatus_DOING TIngestBinlogStatus = 5
+)
+
+func (p TIngestBinlogStatus) String() string {
+ switch p {
+ case TIngestBinlogStatus_ANALYSIS_ERROR:
+ return "ANALYSIS_ERROR"
+ case TIngestBinlogStatus_UNKNOWN:
+ return "UNKNOWN"
+ case TIngestBinlogStatus_NOT_FOUND:
+ return "NOT_FOUND"
+ case TIngestBinlogStatus_OK:
+ return "OK"
+ case TIngestBinlogStatus_FAILED:
+ return "FAILED"
+ case TIngestBinlogStatus_DOING:
+ return "DOING"
+ }
+ return ""
+}
+
+func TIngestBinlogStatusFromString(s string) (TIngestBinlogStatus, error) {
+ switch s {
+ case "ANALYSIS_ERROR":
+ return TIngestBinlogStatus_ANALYSIS_ERROR, nil
+ case "UNKNOWN":
+ return TIngestBinlogStatus_UNKNOWN, nil
+ case "NOT_FOUND":
+ return TIngestBinlogStatus_NOT_FOUND, nil
+ case "OK":
+ return TIngestBinlogStatus_OK, nil
+ case "FAILED":
+ return TIngestBinlogStatus_FAILED, nil
+ case "DOING":
+ return TIngestBinlogStatus_DOING, nil
+ }
+ return TIngestBinlogStatus(0), fmt.Errorf("not a valid TIngestBinlogStatus string")
+}
+
+func TIngestBinlogStatusPtr(v TIngestBinlogStatus) *TIngestBinlogStatus { return &v }
+func (p *TIngestBinlogStatus) Scan(value interface{}) (err error) {
+ var result sql.NullInt64
+ err = result.Scan(value)
+ *p = TIngestBinlogStatus(result.Int64)
+ return
+}
+
+func (p *TIngestBinlogStatus) Value() (driver.Value, error) {
+ if p == nil {
+ return nil, nil
+ }
+ return int64(*p), nil
+}
+
+type TTopicInfoType int64
+
+const (
+ TTopicInfoType_WORKLOAD_GROUP TTopicInfoType = 0
+ TTopicInfoType_MOVE_QUERY_TO_GROUP TTopicInfoType = 1
+ TTopicInfoType_WORKLOAD_SCHED_POLICY TTopicInfoType = 2
+)
+
+func (p TTopicInfoType) String() string {
+ switch p {
+ case TTopicInfoType_WORKLOAD_GROUP:
+ return "WORKLOAD_GROUP"
+ case TTopicInfoType_MOVE_QUERY_TO_GROUP:
+ return "MOVE_QUERY_TO_GROUP"
+ case TTopicInfoType_WORKLOAD_SCHED_POLICY:
+ return "WORKLOAD_SCHED_POLICY"
+ }
+ return ""
+}
+
+func TTopicInfoTypeFromString(s string) (TTopicInfoType, error) {
+ switch s {
+ case "WORKLOAD_GROUP":
+ return TTopicInfoType_WORKLOAD_GROUP, nil
+ case "MOVE_QUERY_TO_GROUP":
+ return TTopicInfoType_MOVE_QUERY_TO_GROUP, nil
+ case "WORKLOAD_SCHED_POLICY":
+ return TTopicInfoType_WORKLOAD_SCHED_POLICY, nil
+ }
+ return TTopicInfoType(0), fmt.Errorf("not a valid TTopicInfoType string")
+}
+
+func TTopicInfoTypePtr(v TTopicInfoType) *TTopicInfoType { return &v }
+func (p *TTopicInfoType) Scan(value interface{}) (err error) {
+ var result sql.NullInt64
+ err = result.Scan(value)
+ *p = TTopicInfoType(result.Int64)
+ return
+}
+
+func (p *TTopicInfoType) Value() (driver.Value, error) {
+ if p == nil {
+ return nil, nil
+ }
+ return int64(*p), nil
+}
+
+type TWorkloadMetricType int64
+
+const (
+ TWorkloadMetricType_QUERY_TIME TWorkloadMetricType = 0
+ TWorkloadMetricType_BE_SCAN_ROWS TWorkloadMetricType = 1
+ TWorkloadMetricType_BE_SCAN_BYTES TWorkloadMetricType = 2
+ TWorkloadMetricType_QUERY_BE_MEMORY_BYTES TWorkloadMetricType = 3
+)
+
+func (p TWorkloadMetricType) String() string {
+ switch p {
+ case TWorkloadMetricType_QUERY_TIME:
+ return "QUERY_TIME"
+ case TWorkloadMetricType_BE_SCAN_ROWS:
+ return "BE_SCAN_ROWS"
+ case TWorkloadMetricType_BE_SCAN_BYTES:
+ return "BE_SCAN_BYTES"
+ case TWorkloadMetricType_QUERY_BE_MEMORY_BYTES:
+ return "QUERY_BE_MEMORY_BYTES"
+ }
+ return ""
+}
+
+func TWorkloadMetricTypeFromString(s string) (TWorkloadMetricType, error) {
+ switch s {
+ case "QUERY_TIME":
+ return TWorkloadMetricType_QUERY_TIME, nil
+ case "BE_SCAN_ROWS":
+ return TWorkloadMetricType_BE_SCAN_ROWS, nil
+ case "BE_SCAN_BYTES":
+ return TWorkloadMetricType_BE_SCAN_BYTES, nil
+ case "QUERY_BE_MEMORY_BYTES":
+ return TWorkloadMetricType_QUERY_BE_MEMORY_BYTES, nil
+ }
+ return TWorkloadMetricType(0), fmt.Errorf("not a valid TWorkloadMetricType string")
+}
+
+func TWorkloadMetricTypePtr(v TWorkloadMetricType) *TWorkloadMetricType { return &v }
+func (p *TWorkloadMetricType) Scan(value interface{}) (err error) {
+ var result sql.NullInt64
+ err = result.Scan(value)
+ *p = TWorkloadMetricType(result.Int64)
+ return
+}
+
+func (p *TWorkloadMetricType) Value() (driver.Value, error) {
+ if p == nil {
+ return nil, nil
+ }
+ return int64(*p), nil
+}
+
+type TCompareOperator int64
+
+const (
+ TCompareOperator_EQUAL TCompareOperator = 0
+ TCompareOperator_GREATER TCompareOperator = 1
+ TCompareOperator_GREATER_EQUAL TCompareOperator = 2
+ TCompareOperator_LESS TCompareOperator = 3
+ TCompareOperator_LESS_EQUAL TCompareOperator = 4
+)
+
+func (p TCompareOperator) String() string {
+ switch p {
+ case TCompareOperator_EQUAL:
+ return "EQUAL"
+ case TCompareOperator_GREATER:
+ return "GREATER"
+ case TCompareOperator_GREATER_EQUAL:
+ return "GREATER_EQUAL"
+ case TCompareOperator_LESS:
+ return "LESS"
+ case TCompareOperator_LESS_EQUAL:
+ return "LESS_EQUAL"
+ }
+ return ""
+}
+
+func TCompareOperatorFromString(s string) (TCompareOperator, error) {
+ switch s {
+ case "EQUAL":
+ return TCompareOperator_EQUAL, nil
+ case "GREATER":
+ return TCompareOperator_GREATER, nil
+ case "GREATER_EQUAL":
+ return TCompareOperator_GREATER_EQUAL, nil
+ case "LESS":
+ return TCompareOperator_LESS, nil
+ case "LESS_EQUAL":
+ return TCompareOperator_LESS_EQUAL, nil
+ }
+ return TCompareOperator(0), fmt.Errorf("not a valid TCompareOperator string")
+}
+
+func TCompareOperatorPtr(v TCompareOperator) *TCompareOperator { return &v }
+func (p *TCompareOperator) Scan(value interface{}) (err error) {
+ var result sql.NullInt64
+ err = result.Scan(value)
+ *p = TCompareOperator(result.Int64)
+ return
+}
+
+func (p *TCompareOperator) Value() (driver.Value, error) {
+ if p == nil {
+ return nil, nil
+ }
+ return int64(*p), nil
+}
+
+type TWorkloadActionType int64
+
+const (
+ TWorkloadActionType_MOVE_QUERY_TO_GROUP TWorkloadActionType = 0
+ TWorkloadActionType_CANCEL_QUERY TWorkloadActionType = 1
+)
+
+func (p TWorkloadActionType) String() string {
+ switch p {
+ case TWorkloadActionType_MOVE_QUERY_TO_GROUP:
+ return "MOVE_QUERY_TO_GROUP"
+ case TWorkloadActionType_CANCEL_QUERY:
+ return "CANCEL_QUERY"
+ }
+ return ""
+}
+
+func TWorkloadActionTypeFromString(s string) (TWorkloadActionType, error) {
+ switch s {
+ case "MOVE_QUERY_TO_GROUP":
+ return TWorkloadActionType_MOVE_QUERY_TO_GROUP, nil
+ case "CANCEL_QUERY":
+ return TWorkloadActionType_CANCEL_QUERY, nil
+ }
+ return TWorkloadActionType(0), fmt.Errorf("not a valid TWorkloadActionType string")
+}
+
+func TWorkloadActionTypePtr(v TWorkloadActionType) *TWorkloadActionType { return &v }
+func (p *TWorkloadActionType) Scan(value interface{}) (err error) {
+ var result sql.NullInt64
+ err = result.Scan(value)
+ *p = TWorkloadActionType(result.Int64)
+ return
+}
+
+func (p *TWorkloadActionType) Value() (driver.Value, error) {
+ if p == nil {
+ return nil, nil
+ }
+ return int64(*p), nil
+}
+
+type TWorkloadType int64
+
+const (
+ TWorkloadType_INTERNAL TWorkloadType = 2
+)
+
+func (p TWorkloadType) String() string {
+ switch p {
+ case TWorkloadType_INTERNAL:
+ return "INTERNAL"
+ }
+ return ""
+}
+
+func TWorkloadTypeFromString(s string) (TWorkloadType, error) {
+ switch s {
+ case "INTERNAL":
+ return TWorkloadType_INTERNAL, nil
+ }
+ return TWorkloadType(0), fmt.Errorf("not a valid TWorkloadType string")
+}
+
+func TWorkloadTypePtr(v TWorkloadType) *TWorkloadType { return &v }
+func (p *TWorkloadType) Scan(value interface{}) (err error) {
+ var result sql.NullInt64
+ err = result.Scan(value)
+ *p = TWorkloadType(result.Int64)
+ return
+}
+
+func (p *TWorkloadType) Value() (driver.Value, error) {
+ if p == nil {
+ return nil, nil
+ }
+ return int64(*p), nil
+}
+
type TExportTaskRequest struct {
Params *palointernalservice.TExecPlanFragmentParams `thrift:"params,1,required" frugal:"1,required,palointernalservice.TExecPlanFragmentParams" json:"params"`
}
@@ -24,7 +418,6 @@ func NewTExportTaskRequest() *TExportTaskRequest {
}
func (p *TExportTaskRequest) InitDefault() {
- *p = TExportTaskRequest{}
}
var TExportTaskRequest_Params_DEFAULT *palointernalservice.TExecPlanFragmentParams
@@ -73,17 +466,14 @@ func (p *TExportTaskRequest) Read(iprot thrift.TProtocol) (err error) {
goto ReadFieldError
}
issetParams = true
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
default:
if err = iprot.Skip(fieldTypeId); err != nil {
goto SkipFieldError
}
}
-
if err = iprot.ReadFieldEnd(); err != nil {
goto ReadFieldEndError
}
@@ -115,10 +505,11 @@ RequiredFieldNotSetError:
}
func (p *TExportTaskRequest) ReadField1(iprot thrift.TProtocol) error {
- p.Params = palointernalservice.NewTExecPlanFragmentParams()
- if err := p.Params.Read(iprot); err != nil {
+ _field := palointernalservice.NewTExecPlanFragmentParams()
+ if err := _field.Read(iprot); err != nil {
return err
}
+ p.Params = _field
return nil
}
@@ -132,7 +523,6 @@ func (p *TExportTaskRequest) Write(oprot thrift.TProtocol) (err error) {
fieldId = 1
goto WriteFieldError
}
-
}
if err = oprot.WriteFieldStop(); err != nil {
goto WriteFieldStopError
@@ -173,6 +563,7 @@ func (p *TExportTaskRequest) String() string {
return ""
}
return fmt.Sprintf("TExportTaskRequest(%+v)", *p)
+
}
func (p *TExportTaskRequest) DeepEqual(ano *TExportTaskRequest) bool {
@@ -196,11 +587,13 @@ func (p *TExportTaskRequest) Field1DeepEqual(src *palointernalservice.TExecPlanF
}
type TTabletStat struct {
- TabletId int64 `thrift:"tablet_id,1,required" frugal:"1,required,i64" json:"tablet_id"`
- DataSize *int64 `thrift:"data_size,2,optional" frugal:"2,optional,i64" json:"data_size,omitempty"`
- RowNum *int64 `thrift:"row_num,3,optional" frugal:"3,optional,i64" json:"row_num,omitempty"`
- VersionCount *int64 `thrift:"version_count,4,optional" frugal:"4,optional,i64" json:"version_count,omitempty"`
- RemoteDataSize *int64 `thrift:"remote_data_size,5,optional" frugal:"5,optional,i64" json:"remote_data_size,omitempty"`
+ TabletId int64 `thrift:"tablet_id,1,required" frugal:"1,required,i64" json:"tablet_id"`
+ DataSize *int64 `thrift:"data_size,2,optional" frugal:"2,optional,i64" json:"data_size,omitempty"`
+ RowCount *int64 `thrift:"row_count,3,optional" frugal:"3,optional,i64" json:"row_count,omitempty"`
+ TotalVersionCount *int64 `thrift:"total_version_count,4,optional" frugal:"4,optional,i64" json:"total_version_count,omitempty"`
+ RemoteDataSize *int64 `thrift:"remote_data_size,5,optional" frugal:"5,optional,i64" json:"remote_data_size,omitempty"`
+ VisibleVersionCount *int64 `thrift:"visible_version_count,6,optional" frugal:"6,optional,i64" json:"visible_version_count,omitempty"`
+ VisibleVersion *int64 `thrift:"visible_version,7,optional" frugal:"7,optional,i64" json:"visible_version,omitempty"`
}
func NewTTabletStat() *TTabletStat {
@@ -208,7 +601,6 @@ func NewTTabletStat() *TTabletStat {
}
func (p *TTabletStat) InitDefault() {
- *p = TTabletStat{}
}
func (p *TTabletStat) GetTabletId() (v int64) {
@@ -224,22 +616,22 @@ func (p *TTabletStat) GetDataSize() (v int64) {
return *p.DataSize
}
-var TTabletStat_RowNum_DEFAULT int64
+var TTabletStat_RowCount_DEFAULT int64
-func (p *TTabletStat) GetRowNum() (v int64) {
- if !p.IsSetRowNum() {
- return TTabletStat_RowNum_DEFAULT
+func (p *TTabletStat) GetRowCount() (v int64) {
+ if !p.IsSetRowCount() {
+ return TTabletStat_RowCount_DEFAULT
}
- return *p.RowNum
+ return *p.RowCount
}
-var TTabletStat_VersionCount_DEFAULT int64
+var TTabletStat_TotalVersionCount_DEFAULT int64
-func (p *TTabletStat) GetVersionCount() (v int64) {
- if !p.IsSetVersionCount() {
- return TTabletStat_VersionCount_DEFAULT
+func (p *TTabletStat) GetTotalVersionCount() (v int64) {
+ if !p.IsSetTotalVersionCount() {
+ return TTabletStat_TotalVersionCount_DEFAULT
}
- return *p.VersionCount
+ return *p.TotalVersionCount
}
var TTabletStat_RemoteDataSize_DEFAULT int64
@@ -250,46 +642,80 @@ func (p *TTabletStat) GetRemoteDataSize() (v int64) {
}
return *p.RemoteDataSize
}
+
+var TTabletStat_VisibleVersionCount_DEFAULT int64
+
+func (p *TTabletStat) GetVisibleVersionCount() (v int64) {
+ if !p.IsSetVisibleVersionCount() {
+ return TTabletStat_VisibleVersionCount_DEFAULT
+ }
+ return *p.VisibleVersionCount
+}
+
+var TTabletStat_VisibleVersion_DEFAULT int64
+
+func (p *TTabletStat) GetVisibleVersion() (v int64) {
+ if !p.IsSetVisibleVersion() {
+ return TTabletStat_VisibleVersion_DEFAULT
+ }
+ return *p.VisibleVersion
+}
func (p *TTabletStat) SetTabletId(val int64) {
p.TabletId = val
}
func (p *TTabletStat) SetDataSize(val *int64) {
p.DataSize = val
}
-func (p *TTabletStat) SetRowNum(val *int64) {
- p.RowNum = val
+func (p *TTabletStat) SetRowCount(val *int64) {
+ p.RowCount = val
}
-func (p *TTabletStat) SetVersionCount(val *int64) {
- p.VersionCount = val
+func (p *TTabletStat) SetTotalVersionCount(val *int64) {
+ p.TotalVersionCount = val
}
func (p *TTabletStat) SetRemoteDataSize(val *int64) {
p.RemoteDataSize = val
}
+func (p *TTabletStat) SetVisibleVersionCount(val *int64) {
+ p.VisibleVersionCount = val
+}
+func (p *TTabletStat) SetVisibleVersion(val *int64) {
+ p.VisibleVersion = val
+}
var fieldIDToName_TTabletStat = map[int16]string{
1: "tablet_id",
2: "data_size",
- 3: "row_num",
- 4: "version_count",
+ 3: "row_count",
+ 4: "total_version_count",
5: "remote_data_size",
+ 6: "visible_version_count",
+ 7: "visible_version",
}
func (p *TTabletStat) IsSetDataSize() bool {
return p.DataSize != nil
}
-func (p *TTabletStat) IsSetRowNum() bool {
- return p.RowNum != nil
+func (p *TTabletStat) IsSetRowCount() bool {
+ return p.RowCount != nil
}
-func (p *TTabletStat) IsSetVersionCount() bool {
- return p.VersionCount != nil
+func (p *TTabletStat) IsSetTotalVersionCount() bool {
+ return p.TotalVersionCount != nil
}
func (p *TTabletStat) IsSetRemoteDataSize() bool {
return p.RemoteDataSize != nil
}
+func (p *TTabletStat) IsSetVisibleVersionCount() bool {
+ return p.VisibleVersionCount != nil
+}
+
+func (p *TTabletStat) IsSetVisibleVersion() bool {
+ return p.VisibleVersion != nil
+}
+
func (p *TTabletStat) Read(iprot thrift.TProtocol) (err error) {
var fieldTypeId thrift.TType
@@ -316,57 +742,62 @@ func (p *TTabletStat) Read(iprot thrift.TProtocol) (err error) {
goto ReadFieldError
}
issetTabletId = true
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 2:
if fieldTypeId == thrift.I64 {
if err = p.ReadField2(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 3:
if fieldTypeId == thrift.I64 {
if err = p.ReadField3(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 4:
if fieldTypeId == thrift.I64 {
if err = p.ReadField4(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 5:
if fieldTypeId == thrift.I64 {
if err = p.ReadField5(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ case 6:
+ if fieldTypeId == thrift.I64 {
+ if err = p.ReadField6(iprot); err != nil {
+ goto ReadFieldError
+ }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ case 7:
+ if fieldTypeId == thrift.I64 {
+ if err = p.ReadField7(iprot); err != nil {
+ goto ReadFieldError
}
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
default:
if err = iprot.Skip(fieldTypeId); err != nil {
goto SkipFieldError
}
}
-
if err = iprot.ReadFieldEnd(); err != nil {
goto ReadFieldEndError
}
@@ -398,47 +829,80 @@ RequiredFieldNotSetError:
}
func (p *TTabletStat) ReadField1(iprot thrift.TProtocol) error {
+
+ var _field int64
if v, err := iprot.ReadI64(); err != nil {
return err
} else {
- p.TabletId = v
+ _field = v
}
+ p.TabletId = _field
return nil
}
-
func (p *TTabletStat) ReadField2(iprot thrift.TProtocol) error {
+
+ var _field *int64
if v, err := iprot.ReadI64(); err != nil {
return err
} else {
- p.DataSize = &v
+ _field = &v
}
+ p.DataSize = _field
return nil
}
-
func (p *TTabletStat) ReadField3(iprot thrift.TProtocol) error {
+
+ var _field *int64
if v, err := iprot.ReadI64(); err != nil {
return err
} else {
- p.RowNum = &v
+ _field = &v
}
+ p.RowCount = _field
return nil
}
-
func (p *TTabletStat) ReadField4(iprot thrift.TProtocol) error {
+
+ var _field *int64
if v, err := iprot.ReadI64(); err != nil {
return err
} else {
- p.VersionCount = &v
+ _field = &v
}
+ p.TotalVersionCount = _field
return nil
}
-
func (p *TTabletStat) ReadField5(iprot thrift.TProtocol) error {
+
+ var _field *int64
+ if v, err := iprot.ReadI64(); err != nil {
+ return err
+ } else {
+ _field = &v
+ }
+ p.RemoteDataSize = _field
+ return nil
+}
+func (p *TTabletStat) ReadField6(iprot thrift.TProtocol) error {
+
+ var _field *int64
+ if v, err := iprot.ReadI64(); err != nil {
+ return err
+ } else {
+ _field = &v
+ }
+ p.VisibleVersionCount = _field
+ return nil
+}
+func (p *TTabletStat) ReadField7(iprot thrift.TProtocol) error {
+
+ var _field *int64
if v, err := iprot.ReadI64(); err != nil {
return err
} else {
- p.RemoteDataSize = &v
+ _field = &v
}
+ p.VisibleVersion = _field
return nil
}
@@ -468,7 +932,14 @@ func (p *TTabletStat) Write(oprot thrift.TProtocol) (err error) {
fieldId = 5
goto WriteFieldError
}
-
+ if err = p.writeField6(oprot); err != nil {
+ fieldId = 6
+ goto WriteFieldError
+ }
+ if err = p.writeField7(oprot); err != nil {
+ fieldId = 7
+ goto WriteFieldError
+ }
}
if err = oprot.WriteFieldStop(); err != nil {
goto WriteFieldStopError
@@ -524,11 +995,11 @@ WriteFieldEndError:
}
func (p *TTabletStat) writeField3(oprot thrift.TProtocol) (err error) {
- if p.IsSetRowNum() {
- if err = oprot.WriteFieldBegin("row_num", thrift.I64, 3); err != nil {
+ if p.IsSetRowCount() {
+ if err = oprot.WriteFieldBegin("row_count", thrift.I64, 3); err != nil {
goto WriteFieldBeginError
}
- if err := oprot.WriteI64(*p.RowNum); err != nil {
+ if err := oprot.WriteI64(*p.RowCount); err != nil {
return err
}
if err = oprot.WriteFieldEnd(); err != nil {
@@ -543,11 +1014,11 @@ WriteFieldEndError:
}
func (p *TTabletStat) writeField4(oprot thrift.TProtocol) (err error) {
- if p.IsSetVersionCount() {
- if err = oprot.WriteFieldBegin("version_count", thrift.I64, 4); err != nil {
+ if p.IsSetTotalVersionCount() {
+ if err = oprot.WriteFieldBegin("total_version_count", thrift.I64, 4); err != nil {
goto WriteFieldBeginError
}
- if err := oprot.WriteI64(*p.VersionCount); err != nil {
+ if err := oprot.WriteI64(*p.TotalVersionCount); err != nil {
return err
}
if err = oprot.WriteFieldEnd(); err != nil {
@@ -580,11 +1051,50 @@ WriteFieldEndError:
return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err)
}
+func (p *TTabletStat) writeField6(oprot thrift.TProtocol) (err error) {
+ if p.IsSetVisibleVersionCount() {
+ if err = oprot.WriteFieldBegin("visible_version_count", thrift.I64, 6); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteI64(*p.VisibleVersionCount); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 6 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 6 end error: ", p), err)
+}
+
+func (p *TTabletStat) writeField7(oprot thrift.TProtocol) (err error) {
+ if p.IsSetVisibleVersion() {
+ if err = oprot.WriteFieldBegin("visible_version", thrift.I64, 7); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteI64(*p.VisibleVersion); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 7 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 7 end error: ", p), err)
+}
+
func (p *TTabletStat) String() string {
if p == nil {
return ""
}
return fmt.Sprintf("TTabletStat(%+v)", *p)
+
}
func (p *TTabletStat) DeepEqual(ano *TTabletStat) bool {
@@ -599,15 +1109,21 @@ func (p *TTabletStat) DeepEqual(ano *TTabletStat) bool {
if !p.Field2DeepEqual(ano.DataSize) {
return false
}
- if !p.Field3DeepEqual(ano.RowNum) {
+ if !p.Field3DeepEqual(ano.RowCount) {
return false
}
- if !p.Field4DeepEqual(ano.VersionCount) {
+ if !p.Field4DeepEqual(ano.TotalVersionCount) {
return false
}
if !p.Field5DeepEqual(ano.RemoteDataSize) {
return false
}
+ if !p.Field6DeepEqual(ano.VisibleVersionCount) {
+ return false
+ }
+ if !p.Field7DeepEqual(ano.VisibleVersion) {
+ return false
+ }
return true
}
@@ -632,24 +1148,24 @@ func (p *TTabletStat) Field2DeepEqual(src *int64) bool {
}
func (p *TTabletStat) Field3DeepEqual(src *int64) bool {
- if p.RowNum == src {
+ if p.RowCount == src {
return true
- } else if p.RowNum == nil || src == nil {
+ } else if p.RowCount == nil || src == nil {
return false
}
- if *p.RowNum != *src {
+ if *p.RowCount != *src {
return false
}
return true
}
func (p *TTabletStat) Field4DeepEqual(src *int64) bool {
- if p.VersionCount == src {
+ if p.TotalVersionCount == src {
return true
- } else if p.VersionCount == nil || src == nil {
+ } else if p.TotalVersionCount == nil || src == nil {
return false
}
- if *p.VersionCount != *src {
+ if *p.TotalVersionCount != *src {
return false
}
return true
@@ -666,6 +1182,30 @@ func (p *TTabletStat) Field5DeepEqual(src *int64) bool {
}
return true
}
+func (p *TTabletStat) Field6DeepEqual(src *int64) bool {
+
+ if p.VisibleVersionCount == src {
+ return true
+ } else if p.VisibleVersionCount == nil || src == nil {
+ return false
+ }
+ if *p.VisibleVersionCount != *src {
+ return false
+ }
+ return true
+}
+func (p *TTabletStat) Field7DeepEqual(src *int64) bool {
+
+ if p.VisibleVersion == src {
+ return true
+ } else if p.VisibleVersion == nil || src == nil {
+ return false
+ }
+ if *p.VisibleVersion != *src {
+ return false
+ }
+ return true
+}
type TTabletStatResult_ struct {
TabletsStats map[int64]*TTabletStat `thrift:"tablets_stats,1,required" frugal:"1,required,map" json:"tablets_stats"`
@@ -677,7 +1217,6 @@ func NewTTabletStatResult_() *TTabletStatResult_ {
}
func (p *TTabletStatResult_) InitDefault() {
- *p = TTabletStatResult_{}
}
func (p *TTabletStatResult_) GetTabletsStats() (v map[int64]*TTabletStat) {
@@ -734,27 +1273,22 @@ func (p *TTabletStatResult_) Read(iprot thrift.TProtocol) (err error) {
goto ReadFieldError
}
issetTabletsStats = true
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 2:
if fieldTypeId == thrift.LIST {
if err = p.ReadField2(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
default:
if err = iprot.Skip(fieldTypeId); err != nil {
goto SkipFieldError
}
}
-
if err = iprot.ReadFieldEnd(); err != nil {
goto ReadFieldEndError
}
@@ -790,7 +1324,8 @@ func (p *TTabletStatResult_) ReadField1(iprot thrift.TProtocol) error {
if err != nil {
return err
}
- p.TabletsStats = make(map[int64]*TTabletStat, size)
+ _field := make(map[int64]*TTabletStat, size)
+ values := make([]TTabletStat, size)
for i := 0; i < size; i++ {
var _key int64
if v, err := iprot.ReadI64(); err != nil {
@@ -798,36 +1333,42 @@ func (p *TTabletStatResult_) ReadField1(iprot thrift.TProtocol) error {
} else {
_key = v
}
- _val := NewTTabletStat()
+
+ _val := &values[i]
+ _val.InitDefault()
if err := _val.Read(iprot); err != nil {
return err
}
- p.TabletsStats[_key] = _val
+ _field[_key] = _val
}
if err := iprot.ReadMapEnd(); err != nil {
return err
}
+ p.TabletsStats = _field
return nil
}
-
func (p *TTabletStatResult_) ReadField2(iprot thrift.TProtocol) error {
_, size, err := iprot.ReadListBegin()
if err != nil {
return err
}
- p.TabletStatList = make([]*TTabletStat, 0, size)
+ _field := make([]*TTabletStat, 0, size)
+ values := make([]TTabletStat, size)
for i := 0; i < size; i++ {
- _elem := NewTTabletStat()
+ _elem := &values[i]
+ _elem.InitDefault()
+
if err := _elem.Read(iprot); err != nil {
return err
}
- p.TabletStatList = append(p.TabletStatList, _elem)
+ _field = append(_field, _elem)
}
if err := iprot.ReadListEnd(); err != nil {
return err
}
+ p.TabletStatList = _field
return nil
}
@@ -845,7 +1386,6 @@ func (p *TTabletStatResult_) Write(oprot thrift.TProtocol) (err error) {
fieldId = 2
goto WriteFieldError
}
-
}
if err = oprot.WriteFieldStop(); err != nil {
goto WriteFieldStopError
@@ -872,11 +1412,9 @@ func (p *TTabletStatResult_) writeField1(oprot thrift.TProtocol) (err error) {
return err
}
for k, v := range p.TabletsStats {
-
if err := oprot.WriteI64(k); err != nil {
return err
}
-
if err := v.Write(oprot); err != nil {
return err
}
@@ -926,6 +1464,7 @@ func (p *TTabletStatResult_) String() string {
return ""
}
return fmt.Sprintf("TTabletStatResult_(%+v)", *p)
+
}
func (p *TTabletStatResult_) DeepEqual(ano *TTabletStatResult_) bool {
@@ -982,7 +1521,6 @@ func NewTKafkaLoadInfo() *TKafkaLoadInfo {
}
func (p *TKafkaLoadInfo) InitDefault() {
- *p = TKafkaLoadInfo{}
}
func (p *TKafkaLoadInfo) GetBrokers() (v string) {
@@ -1057,10 +1595,8 @@ func (p *TKafkaLoadInfo) Read(iprot thrift.TProtocol) (err error) {
goto ReadFieldError
}
issetBrokers = true
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 2:
if fieldTypeId == thrift.STRING {
@@ -1068,10 +1604,8 @@ func (p *TKafkaLoadInfo) Read(iprot thrift.TProtocol) (err error) {
goto ReadFieldError
}
issetTopic = true
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 3:
if fieldTypeId == thrift.MAP {
@@ -1079,27 +1613,22 @@ func (p *TKafkaLoadInfo) Read(iprot thrift.TProtocol) (err error) {
goto ReadFieldError
}
issetPartitionBeginOffset = true
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 4:
if fieldTypeId == thrift.MAP {
if err = p.ReadField4(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
default:
if err = iprot.Skip(fieldTypeId); err != nil {
goto SkipFieldError
}
}
-
if err = iprot.ReadFieldEnd(); err != nil {
goto ReadFieldEndError
}
@@ -1141,29 +1670,33 @@ RequiredFieldNotSetError:
}
func (p *TKafkaLoadInfo) ReadField1(iprot thrift.TProtocol) error {
+
+ var _field string
if v, err := iprot.ReadString(); err != nil {
return err
} else {
- p.Brokers = v
+ _field = v
}
+ p.Brokers = _field
return nil
}
-
func (p *TKafkaLoadInfo) ReadField2(iprot thrift.TProtocol) error {
+
+ var _field string
if v, err := iprot.ReadString(); err != nil {
return err
} else {
- p.Topic = v
+ _field = v
}
+ p.Topic = _field
return nil
}
-
func (p *TKafkaLoadInfo) ReadField3(iprot thrift.TProtocol) error {
_, _, size, err := iprot.ReadMapBegin()
if err != nil {
return err
}
- p.PartitionBeginOffset = make(map[int32]int64, size)
+ _field := make(map[int32]int64, size)
for i := 0; i < size; i++ {
var _key int32
if v, err := iprot.ReadI32(); err != nil {
@@ -1179,20 +1712,20 @@ func (p *TKafkaLoadInfo) ReadField3(iprot thrift.TProtocol) error {
_val = v
}
- p.PartitionBeginOffset[_key] = _val
+ _field[_key] = _val
}
if err := iprot.ReadMapEnd(); err != nil {
return err
}
+ p.PartitionBeginOffset = _field
return nil
}
-
func (p *TKafkaLoadInfo) ReadField4(iprot thrift.TProtocol) error {
_, _, size, err := iprot.ReadMapBegin()
if err != nil {
return err
}
- p.Properties = make(map[string]string, size)
+ _field := make(map[string]string, size)
for i := 0; i < size; i++ {
var _key string
if v, err := iprot.ReadString(); err != nil {
@@ -1208,11 +1741,12 @@ func (p *TKafkaLoadInfo) ReadField4(iprot thrift.TProtocol) error {
_val = v
}
- p.Properties[_key] = _val
+ _field[_key] = _val
}
if err := iprot.ReadMapEnd(); err != nil {
return err
}
+ p.Properties = _field
return nil
}
@@ -1238,7 +1772,6 @@ func (p *TKafkaLoadInfo) Write(oprot thrift.TProtocol) (err error) {
fieldId = 4
goto WriteFieldError
}
-
}
if err = oprot.WriteFieldStop(); err != nil {
goto WriteFieldStopError
@@ -1299,11 +1832,9 @@ func (p *TKafkaLoadInfo) writeField3(oprot thrift.TProtocol) (err error) {
return err
}
for k, v := range p.PartitionBeginOffset {
-
if err := oprot.WriteI32(k); err != nil {
return err
}
-
if err := oprot.WriteI64(v); err != nil {
return err
}
@@ -1330,11 +1861,9 @@ func (p *TKafkaLoadInfo) writeField4(oprot thrift.TProtocol) (err error) {
return err
}
for k, v := range p.Properties {
-
if err := oprot.WriteString(k); err != nil {
return err
}
-
if err := oprot.WriteString(v); err != nil {
return err
}
@@ -1358,6 +1887,7 @@ func (p *TKafkaLoadInfo) String() string {
return ""
}
return fmt.Sprintf("TKafkaLoadInfo(%+v)", *p)
+
}
func (p *TKafkaLoadInfo) DeepEqual(ano *TKafkaLoadInfo) bool {
@@ -1423,22 +1953,25 @@ func (p *TKafkaLoadInfo) Field4DeepEqual(src map[string]string) bool {
}
type TRoutineLoadTask struct {
- Type types.TLoadSourceType `thrift:"type,1,required" frugal:"1,required,TLoadSourceType" json:"type"`
- JobId int64 `thrift:"job_id,2,required" frugal:"2,required,i64" json:"job_id"`
- Id *types.TUniqueId `thrift:"id,3,required" frugal:"3,required,types.TUniqueId" json:"id"`
- TxnId int64 `thrift:"txn_id,4,required" frugal:"4,required,i64" json:"txn_id"`
- AuthCode int64 `thrift:"auth_code,5,required" frugal:"5,required,i64" json:"auth_code"`
- Db *string `thrift:"db,6,optional" frugal:"6,optional,string" json:"db,omitempty"`
- Tbl *string `thrift:"tbl,7,optional" frugal:"7,optional,string" json:"tbl,omitempty"`
- Label *string `thrift:"label,8,optional" frugal:"8,optional,string" json:"label,omitempty"`
- MaxIntervalS *int64 `thrift:"max_interval_s,9,optional" frugal:"9,optional,i64" json:"max_interval_s,omitempty"`
- MaxBatchRows *int64 `thrift:"max_batch_rows,10,optional" frugal:"10,optional,i64" json:"max_batch_rows,omitempty"`
- MaxBatchSize *int64 `thrift:"max_batch_size,11,optional" frugal:"11,optional,i64" json:"max_batch_size,omitempty"`
- KafkaLoadInfo *TKafkaLoadInfo `thrift:"kafka_load_info,12,optional" frugal:"12,optional,TKafkaLoadInfo" json:"kafka_load_info,omitempty"`
- Params *palointernalservice.TExecPlanFragmentParams `thrift:"params,13,optional" frugal:"13,optional,palointernalservice.TExecPlanFragmentParams" json:"params,omitempty"`
- Format *plannodes.TFileFormatType `thrift:"format,14,optional" frugal:"14,optional,TFileFormatType" json:"format,omitempty"`
- PipelineParams *palointernalservice.TPipelineFragmentParams `thrift:"pipeline_params,15,optional" frugal:"15,optional,palointernalservice.TPipelineFragmentParams" json:"pipeline_params,omitempty"`
- IsMultiTable *bool `thrift:"is_multi_table,16,optional" frugal:"16,optional,bool" json:"is_multi_table,omitempty"`
+ Type types.TLoadSourceType `thrift:"type,1,required" frugal:"1,required,TLoadSourceType" json:"type"`
+ JobId int64 `thrift:"job_id,2,required" frugal:"2,required,i64" json:"job_id"`
+ Id *types.TUniqueId `thrift:"id,3,required" frugal:"3,required,types.TUniqueId" json:"id"`
+ TxnId int64 `thrift:"txn_id,4,required" frugal:"4,required,i64" json:"txn_id"`
+ AuthCode int64 `thrift:"auth_code,5,required" frugal:"5,required,i64" json:"auth_code"`
+ Db *string `thrift:"db,6,optional" frugal:"6,optional,string" json:"db,omitempty"`
+ Tbl *string `thrift:"tbl,7,optional" frugal:"7,optional,string" json:"tbl,omitempty"`
+ Label *string `thrift:"label,8,optional" frugal:"8,optional,string" json:"label,omitempty"`
+ MaxIntervalS *int64 `thrift:"max_interval_s,9,optional" frugal:"9,optional,i64" json:"max_interval_s,omitempty"`
+ MaxBatchRows *int64 `thrift:"max_batch_rows,10,optional" frugal:"10,optional,i64" json:"max_batch_rows,omitempty"`
+ MaxBatchSize *int64 `thrift:"max_batch_size,11,optional" frugal:"11,optional,i64" json:"max_batch_size,omitempty"`
+ KafkaLoadInfo *TKafkaLoadInfo `thrift:"kafka_load_info,12,optional" frugal:"12,optional,TKafkaLoadInfo" json:"kafka_load_info,omitempty"`
+ Params *palointernalservice.TExecPlanFragmentParams `thrift:"params,13,optional" frugal:"13,optional,palointernalservice.TExecPlanFragmentParams" json:"params,omitempty"`
+ Format *plannodes.TFileFormatType `thrift:"format,14,optional" frugal:"14,optional,TFileFormatType" json:"format,omitempty"`
+ PipelineParams *palointernalservice.TPipelineFragmentParams `thrift:"pipeline_params,15,optional" frugal:"15,optional,palointernalservice.TPipelineFragmentParams" json:"pipeline_params,omitempty"`
+ IsMultiTable *bool `thrift:"is_multi_table,16,optional" frugal:"16,optional,bool" json:"is_multi_table,omitempty"`
+ MemtableOnSinkNode *bool `thrift:"memtable_on_sink_node,17,optional" frugal:"17,optional,bool" json:"memtable_on_sink_node,omitempty"`
+ QualifiedUser *string `thrift:"qualified_user,18,optional" frugal:"18,optional,string" json:"qualified_user,omitempty"`
+ CloudCluster *string `thrift:"cloud_cluster,19,optional" frugal:"19,optional,string" json:"cloud_cluster,omitempty"`
}
func NewTRoutineLoadTask() *TRoutineLoadTask {
@@ -1446,7 +1979,6 @@ func NewTRoutineLoadTask() *TRoutineLoadTask {
}
func (p *TRoutineLoadTask) InitDefault() {
- *p = TRoutineLoadTask{}
}
func (p *TRoutineLoadTask) GetType() (v types.TLoadSourceType) {
@@ -1572,6 +2104,33 @@ func (p *TRoutineLoadTask) GetIsMultiTable() (v bool) {
}
return *p.IsMultiTable
}
+
+var TRoutineLoadTask_MemtableOnSinkNode_DEFAULT bool
+
+func (p *TRoutineLoadTask) GetMemtableOnSinkNode() (v bool) {
+ if !p.IsSetMemtableOnSinkNode() {
+ return TRoutineLoadTask_MemtableOnSinkNode_DEFAULT
+ }
+ return *p.MemtableOnSinkNode
+}
+
+var TRoutineLoadTask_QualifiedUser_DEFAULT string
+
+func (p *TRoutineLoadTask) GetQualifiedUser() (v string) {
+ if !p.IsSetQualifiedUser() {
+ return TRoutineLoadTask_QualifiedUser_DEFAULT
+ }
+ return *p.QualifiedUser
+}
+
+var TRoutineLoadTask_CloudCluster_DEFAULT string
+
+func (p *TRoutineLoadTask) GetCloudCluster() (v string) {
+ if !p.IsSetCloudCluster() {
+ return TRoutineLoadTask_CloudCluster_DEFAULT
+ }
+ return *p.CloudCluster
+}
func (p *TRoutineLoadTask) SetType(val types.TLoadSourceType) {
p.Type = val
}
@@ -1620,6 +2179,15 @@ func (p *TRoutineLoadTask) SetPipelineParams(val *palointernalservice.TPipelineF
func (p *TRoutineLoadTask) SetIsMultiTable(val *bool) {
p.IsMultiTable = val
}
+func (p *TRoutineLoadTask) SetMemtableOnSinkNode(val *bool) {
+ p.MemtableOnSinkNode = val
+}
+func (p *TRoutineLoadTask) SetQualifiedUser(val *string) {
+ p.QualifiedUser = val
+}
+func (p *TRoutineLoadTask) SetCloudCluster(val *string) {
+ p.CloudCluster = val
+}
var fieldIDToName_TRoutineLoadTask = map[int16]string{
1: "type",
@@ -1638,6 +2206,9 @@ var fieldIDToName_TRoutineLoadTask = map[int16]string{
14: "format",
15: "pipeline_params",
16: "is_multi_table",
+ 17: "memtable_on_sink_node",
+ 18: "qualified_user",
+ 19: "cloud_cluster",
}
func (p *TRoutineLoadTask) IsSetId() bool {
@@ -1688,6 +2259,18 @@ func (p *TRoutineLoadTask) IsSetIsMultiTable() bool {
return p.IsMultiTable != nil
}
+func (p *TRoutineLoadTask) IsSetMemtableOnSinkNode() bool {
+ return p.MemtableOnSinkNode != nil
+}
+
+func (p *TRoutineLoadTask) IsSetQualifiedUser() bool {
+ return p.QualifiedUser != nil
+}
+
+func (p *TRoutineLoadTask) IsSetCloudCluster() bool {
+ return p.CloudCluster != nil
+}
+
func (p *TRoutineLoadTask) Read(iprot thrift.TProtocol) (err error) {
var fieldTypeId thrift.TType
@@ -1718,10 +2301,8 @@ func (p *TRoutineLoadTask) Read(iprot thrift.TProtocol) (err error) {
goto ReadFieldError
}
issetType = true
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 2:
if fieldTypeId == thrift.I64 {
@@ -1729,10 +2310,8 @@ func (p *TRoutineLoadTask) Read(iprot thrift.TProtocol) (err error) {
goto ReadFieldError
}
issetJobId = true
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 3:
if fieldTypeId == thrift.STRUCT {
@@ -1740,10 +2319,8 @@ func (p *TRoutineLoadTask) Read(iprot thrift.TProtocol) (err error) {
goto ReadFieldError
}
issetId = true
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 4:
if fieldTypeId == thrift.I64 {
@@ -1751,10 +2328,8 @@ func (p *TRoutineLoadTask) Read(iprot thrift.TProtocol) (err error) {
goto ReadFieldError
}
issetTxnId = true
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 5:
if fieldTypeId == thrift.I64 {
@@ -1762,127 +2337,126 @@ func (p *TRoutineLoadTask) Read(iprot thrift.TProtocol) (err error) {
goto ReadFieldError
}
issetAuthCode = true
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 6:
if fieldTypeId == thrift.STRING {
if err = p.ReadField6(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 7:
if fieldTypeId == thrift.STRING {
if err = p.ReadField7(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 8:
if fieldTypeId == thrift.STRING {
if err = p.ReadField8(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 9:
if fieldTypeId == thrift.I64 {
if err = p.ReadField9(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 10:
if fieldTypeId == thrift.I64 {
if err = p.ReadField10(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 11:
if fieldTypeId == thrift.I64 {
if err = p.ReadField11(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 12:
if fieldTypeId == thrift.STRUCT {
if err = p.ReadField12(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 13:
if fieldTypeId == thrift.STRUCT {
if err = p.ReadField13(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 14:
if fieldTypeId == thrift.I32 {
if err = p.ReadField14(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 15:
if fieldTypeId == thrift.STRUCT {
if err = p.ReadField15(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 16:
if fieldTypeId == thrift.BOOL {
if err = p.ReadField16(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ case 17:
+ if fieldTypeId == thrift.BOOL {
+ if err = p.ReadField17(iprot); err != nil {
+ goto ReadFieldError
+ }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ case 18:
+ if fieldTypeId == thrift.STRING {
+ if err = p.ReadField18(iprot); err != nil {
+ goto ReadFieldError
+ }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ case 19:
+ if fieldTypeId == thrift.STRING {
+ if err = p.ReadField19(iprot); err != nil {
+ goto ReadFieldError
}
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
default:
if err = iprot.Skip(fieldTypeId); err != nil {
goto SkipFieldError
}
}
-
if err = iprot.ReadFieldEnd(); err != nil {
goto ReadFieldEndError
}
@@ -1934,143 +2508,201 @@ RequiredFieldNotSetError:
}
func (p *TRoutineLoadTask) ReadField1(iprot thrift.TProtocol) error {
+
+ var _field types.TLoadSourceType
if v, err := iprot.ReadI32(); err != nil {
return err
} else {
- p.Type = types.TLoadSourceType(v)
+ _field = types.TLoadSourceType(v)
}
+ p.Type = _field
return nil
}
-
func (p *TRoutineLoadTask) ReadField2(iprot thrift.TProtocol) error {
+
+ var _field int64
if v, err := iprot.ReadI64(); err != nil {
return err
} else {
- p.JobId = v
+ _field = v
}
+ p.JobId = _field
return nil
}
-
func (p *TRoutineLoadTask) ReadField3(iprot thrift.TProtocol) error {
- p.Id = types.NewTUniqueId()
- if err := p.Id.Read(iprot); err != nil {
+ _field := types.NewTUniqueId()
+ if err := _field.Read(iprot); err != nil {
return err
}
+ p.Id = _field
return nil
}
-
func (p *TRoutineLoadTask) ReadField4(iprot thrift.TProtocol) error {
+
+ var _field int64
if v, err := iprot.ReadI64(); err != nil {
return err
} else {
- p.TxnId = v
+ _field = v
}
+ p.TxnId = _field
return nil
}
-
func (p *TRoutineLoadTask) ReadField5(iprot thrift.TProtocol) error {
+
+ var _field int64
if v, err := iprot.ReadI64(); err != nil {
return err
} else {
- p.AuthCode = v
+ _field = v
}
+ p.AuthCode = _field
return nil
}
-
func (p *TRoutineLoadTask) ReadField6(iprot thrift.TProtocol) error {
+
+ var _field *string
if v, err := iprot.ReadString(); err != nil {
return err
} else {
- p.Db = &v
+ _field = &v
}
+ p.Db = _field
return nil
}
-
func (p *TRoutineLoadTask) ReadField7(iprot thrift.TProtocol) error {
+
+ var _field *string
if v, err := iprot.ReadString(); err != nil {
return err
} else {
- p.Tbl = &v
+ _field = &v
}
+ p.Tbl = _field
return nil
}
-
func (p *TRoutineLoadTask) ReadField8(iprot thrift.TProtocol) error {
+
+ var _field *string
if v, err := iprot.ReadString(); err != nil {
return err
} else {
- p.Label = &v
+ _field = &v
}
+ p.Label = _field
return nil
}
-
func (p *TRoutineLoadTask) ReadField9(iprot thrift.TProtocol) error {
+
+ var _field *int64
if v, err := iprot.ReadI64(); err != nil {
return err
} else {
- p.MaxIntervalS = &v
+ _field = &v
}
+ p.MaxIntervalS = _field
return nil
}
-
func (p *TRoutineLoadTask) ReadField10(iprot thrift.TProtocol) error {
+
+ var _field *int64
if v, err := iprot.ReadI64(); err != nil {
return err
} else {
- p.MaxBatchRows = &v
+ _field = &v
}
+ p.MaxBatchRows = _field
return nil
}
-
func (p *TRoutineLoadTask) ReadField11(iprot thrift.TProtocol) error {
+
+ var _field *int64
if v, err := iprot.ReadI64(); err != nil {
return err
} else {
- p.MaxBatchSize = &v
+ _field = &v
}
+ p.MaxBatchSize = _field
return nil
}
-
func (p *TRoutineLoadTask) ReadField12(iprot thrift.TProtocol) error {
- p.KafkaLoadInfo = NewTKafkaLoadInfo()
- if err := p.KafkaLoadInfo.Read(iprot); err != nil {
+ _field := NewTKafkaLoadInfo()
+ if err := _field.Read(iprot); err != nil {
return err
}
+ p.KafkaLoadInfo = _field
return nil
}
-
func (p *TRoutineLoadTask) ReadField13(iprot thrift.TProtocol) error {
- p.Params = palointernalservice.NewTExecPlanFragmentParams()
- if err := p.Params.Read(iprot); err != nil {
+ _field := palointernalservice.NewTExecPlanFragmentParams()
+ if err := _field.Read(iprot); err != nil {
return err
}
+ p.Params = _field
return nil
}
-
func (p *TRoutineLoadTask) ReadField14(iprot thrift.TProtocol) error {
+
+ var _field *plannodes.TFileFormatType
if v, err := iprot.ReadI32(); err != nil {
return err
} else {
tmp := plannodes.TFileFormatType(v)
- p.Format = &tmp
+ _field = &tmp
}
+ p.Format = _field
return nil
}
-
func (p *TRoutineLoadTask) ReadField15(iprot thrift.TProtocol) error {
- p.PipelineParams = palointernalservice.NewTPipelineFragmentParams()
- if err := p.PipelineParams.Read(iprot); err != nil {
+ _field := palointernalservice.NewTPipelineFragmentParams()
+ if err := _field.Read(iprot); err != nil {
return err
}
+ p.PipelineParams = _field
return nil
}
-
func (p *TRoutineLoadTask) ReadField16(iprot thrift.TProtocol) error {
+
+ var _field *bool
+ if v, err := iprot.ReadBool(); err != nil {
+ return err
+ } else {
+ _field = &v
+ }
+ p.IsMultiTable = _field
+ return nil
+}
+func (p *TRoutineLoadTask) ReadField17(iprot thrift.TProtocol) error {
+
+ var _field *bool
if v, err := iprot.ReadBool(); err != nil {
return err
} else {
- p.IsMultiTable = &v
+ _field = &v
}
+ p.MemtableOnSinkNode = _field
+ return nil
+}
+func (p *TRoutineLoadTask) ReadField18(iprot thrift.TProtocol) error {
+
+ var _field *string
+ if v, err := iprot.ReadString(); err != nil {
+ return err
+ } else {
+ _field = &v
+ }
+ p.QualifiedUser = _field
+ return nil
+}
+func (p *TRoutineLoadTask) ReadField19(iprot thrift.TProtocol) error {
+
+ var _field *string
+ if v, err := iprot.ReadString(); err != nil {
+ return err
+ } else {
+ _field = &v
+ }
+ p.CloudCluster = _field
return nil
}
@@ -2144,7 +2776,18 @@ func (p *TRoutineLoadTask) Write(oprot thrift.TProtocol) (err error) {
fieldId = 16
goto WriteFieldError
}
-
+ if err = p.writeField17(oprot); err != nil {
+ fieldId = 17
+ goto WriteFieldError
+ }
+ if err = p.writeField18(oprot); err != nil {
+ fieldId = 18
+ goto WriteFieldError
+ }
+ if err = p.writeField19(oprot); err != nil {
+ fieldId = 19
+ goto WriteFieldError
+ }
}
if err = oprot.WriteFieldStop(); err != nil {
goto WriteFieldStopError
@@ -2457,11 +3100,69 @@ WriteFieldEndError:
return thrift.PrependError(fmt.Sprintf("%T write field 16 end error: ", p), err)
}
+func (p *TRoutineLoadTask) writeField17(oprot thrift.TProtocol) (err error) {
+ if p.IsSetMemtableOnSinkNode() {
+ if err = oprot.WriteFieldBegin("memtable_on_sink_node", thrift.BOOL, 17); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteBool(*p.MemtableOnSinkNode); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 17 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 17 end error: ", p), err)
+}
+
+func (p *TRoutineLoadTask) writeField18(oprot thrift.TProtocol) (err error) {
+ if p.IsSetQualifiedUser() {
+ if err = oprot.WriteFieldBegin("qualified_user", thrift.STRING, 18); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteString(*p.QualifiedUser); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 18 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 18 end error: ", p), err)
+}
+
+func (p *TRoutineLoadTask) writeField19(oprot thrift.TProtocol) (err error) {
+ if p.IsSetCloudCluster() {
+ if err = oprot.WriteFieldBegin("cloud_cluster", thrift.STRING, 19); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteString(*p.CloudCluster); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 19 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 19 end error: ", p), err)
+}
+
func (p *TRoutineLoadTask) String() string {
if p == nil {
return ""
}
return fmt.Sprintf("TRoutineLoadTask(%+v)", *p)
+
}
func (p *TRoutineLoadTask) DeepEqual(ano *TRoutineLoadTask) bool {
@@ -2518,6 +3219,15 @@ func (p *TRoutineLoadTask) DeepEqual(ano *TRoutineLoadTask) bool {
if !p.Field16DeepEqual(ano.IsMultiTable) {
return false
}
+ if !p.Field17DeepEqual(ano.MemtableOnSinkNode) {
+ return false
+ }
+ if !p.Field18DeepEqual(ano.QualifiedUser) {
+ return false
+ }
+ if !p.Field19DeepEqual(ano.CloudCluster) {
+ return false
+ }
return true
}
@@ -2673,6 +3383,42 @@ func (p *TRoutineLoadTask) Field16DeepEqual(src *bool) bool {
}
return true
}
+func (p *TRoutineLoadTask) Field17DeepEqual(src *bool) bool {
+
+ if p.MemtableOnSinkNode == src {
+ return true
+ } else if p.MemtableOnSinkNode == nil || src == nil {
+ return false
+ }
+ if *p.MemtableOnSinkNode != *src {
+ return false
+ }
+ return true
+}
+func (p *TRoutineLoadTask) Field18DeepEqual(src *string) bool {
+
+ if p.QualifiedUser == src {
+ return true
+ } else if p.QualifiedUser == nil || src == nil {
+ return false
+ }
+ if strings.Compare(*p.QualifiedUser, *src) != 0 {
+ return false
+ }
+ return true
+}
+func (p *TRoutineLoadTask) Field19DeepEqual(src *string) bool {
+
+ if p.CloudCluster == src {
+ return true
+ } else if p.CloudCluster == nil || src == nil {
+ return false
+ }
+ if strings.Compare(*p.CloudCluster, *src) != 0 {
+ return false
+ }
+ return true
+}
type TKafkaMetaProxyRequest struct {
KafkaInfo *TKafkaLoadInfo `thrift:"kafka_info,1,optional" frugal:"1,optional,TKafkaLoadInfo" json:"kafka_info,omitempty"`
@@ -2683,7 +3429,6 @@ func NewTKafkaMetaProxyRequest() *TKafkaMetaProxyRequest {
}
func (p *TKafkaMetaProxyRequest) InitDefault() {
- *p = TKafkaMetaProxyRequest{}
}
var TKafkaMetaProxyRequest_KafkaInfo_DEFAULT *TKafkaLoadInfo
@@ -2730,17 +3475,14 @@ func (p *TKafkaMetaProxyRequest) Read(iprot thrift.TProtocol) (err error) {
if err = p.ReadField1(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
default:
if err = iprot.Skip(fieldTypeId); err != nil {
goto SkipFieldError
}
}
-
if err = iprot.ReadFieldEnd(); err != nil {
goto ReadFieldEndError
}
@@ -2766,10 +3508,11 @@ ReadStructEndError:
}
func (p *TKafkaMetaProxyRequest) ReadField1(iprot thrift.TProtocol) error {
- p.KafkaInfo = NewTKafkaLoadInfo()
- if err := p.KafkaInfo.Read(iprot); err != nil {
+ _field := NewTKafkaLoadInfo()
+ if err := _field.Read(iprot); err != nil {
return err
}
+ p.KafkaInfo = _field
return nil
}
@@ -2783,7 +3526,6 @@ func (p *TKafkaMetaProxyRequest) Write(oprot thrift.TProtocol) (err error) {
fieldId = 1
goto WriteFieldError
}
-
}
if err = oprot.WriteFieldStop(); err != nil {
goto WriteFieldStopError
@@ -2826,6 +3568,7 @@ func (p *TKafkaMetaProxyRequest) String() string {
return ""
}
return fmt.Sprintf("TKafkaMetaProxyRequest(%+v)", *p)
+
}
func (p *TKafkaMetaProxyRequest) DeepEqual(ano *TKafkaMetaProxyRequest) bool {
@@ -2857,7 +3600,6 @@ func NewTKafkaMetaProxyResult_() *TKafkaMetaProxyResult_ {
}
func (p *TKafkaMetaProxyResult_) InitDefault() {
- *p = TKafkaMetaProxyResult_{}
}
var TKafkaMetaProxyResult__PartitionIds_DEFAULT []int32
@@ -2904,17 +3646,14 @@ func (p *TKafkaMetaProxyResult_) Read(iprot thrift.TProtocol) (err error) {
if err = p.ReadField1(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
default:
if err = iprot.Skip(fieldTypeId); err != nil {
goto SkipFieldError
}
}
-
if err = iprot.ReadFieldEnd(); err != nil {
goto ReadFieldEndError
}
@@ -2944,8 +3683,9 @@ func (p *TKafkaMetaProxyResult_) ReadField1(iprot thrift.TProtocol) error {
if err != nil {
return err
}
- p.PartitionIds = make([]int32, 0, size)
+ _field := make([]int32, 0, size)
for i := 0; i < size; i++ {
+
var _elem int32
if v, err := iprot.ReadI32(); err != nil {
return err
@@ -2953,11 +3693,12 @@ func (p *TKafkaMetaProxyResult_) ReadField1(iprot thrift.TProtocol) error {
_elem = v
}
- p.PartitionIds = append(p.PartitionIds, _elem)
+ _field = append(_field, _elem)
}
if err := iprot.ReadListEnd(); err != nil {
return err
}
+ p.PartitionIds = _field
return nil
}
@@ -2971,7 +3712,6 @@ func (p *TKafkaMetaProxyResult_) Write(oprot thrift.TProtocol) (err error) {
fieldId = 1
goto WriteFieldError
}
-
}
if err = oprot.WriteFieldStop(); err != nil {
goto WriteFieldStopError
@@ -3022,6 +3762,7 @@ func (p *TKafkaMetaProxyResult_) String() string {
return ""
}
return fmt.Sprintf("TKafkaMetaProxyResult_(%+v)", *p)
+
}
func (p *TKafkaMetaProxyResult_) DeepEqual(ano *TKafkaMetaProxyResult_) bool {
@@ -3059,7 +3800,6 @@ func NewTProxyRequest() *TProxyRequest {
}
func (p *TProxyRequest) InitDefault() {
- *p = TProxyRequest{}
}
var TProxyRequest_KafkaMetaRequest_DEFAULT *TKafkaMetaProxyRequest
@@ -3106,17 +3846,14 @@ func (p *TProxyRequest) Read(iprot thrift.TProtocol) (err error) {
if err = p.ReadField1(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
default:
if err = iprot.Skip(fieldTypeId); err != nil {
goto SkipFieldError
}
}
-
if err = iprot.ReadFieldEnd(); err != nil {
goto ReadFieldEndError
}
@@ -3142,10 +3879,11 @@ ReadStructEndError:
}
func (p *TProxyRequest) ReadField1(iprot thrift.TProtocol) error {
- p.KafkaMetaRequest = NewTKafkaMetaProxyRequest()
- if err := p.KafkaMetaRequest.Read(iprot); err != nil {
+ _field := NewTKafkaMetaProxyRequest()
+ if err := _field.Read(iprot); err != nil {
return err
}
+ p.KafkaMetaRequest = _field
return nil
}
@@ -3159,7 +3897,6 @@ func (p *TProxyRequest) Write(oprot thrift.TProtocol) (err error) {
fieldId = 1
goto WriteFieldError
}
-
}
if err = oprot.WriteFieldStop(); err != nil {
goto WriteFieldStopError
@@ -3202,6 +3939,7 @@ func (p *TProxyRequest) String() string {
return ""
}
return fmt.Sprintf("TProxyRequest(%+v)", *p)
+
}
func (p *TProxyRequest) DeepEqual(ano *TProxyRequest) bool {
@@ -3234,7 +3972,6 @@ func NewTProxyResult_() *TProxyResult_ {
}
func (p *TProxyResult_) InitDefault() {
- *p = TProxyResult_{}
}
var TProxyResult__Status_DEFAULT *status.TStatus
@@ -3300,27 +4037,22 @@ func (p *TProxyResult_) Read(iprot thrift.TProtocol) (err error) {
goto ReadFieldError
}
issetStatus = true
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 2:
if fieldTypeId == thrift.STRUCT {
if err = p.ReadField2(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
default:
if err = iprot.Skip(fieldTypeId); err != nil {
goto SkipFieldError
}
}
-
if err = iprot.ReadFieldEnd(); err != nil {
goto ReadFieldEndError
}
@@ -3352,18 +4084,19 @@ RequiredFieldNotSetError:
}
func (p *TProxyResult_) ReadField1(iprot thrift.TProtocol) error {
- p.Status = status.NewTStatus()
- if err := p.Status.Read(iprot); err != nil {
+ _field := status.NewTStatus()
+ if err := _field.Read(iprot); err != nil {
return err
}
+ p.Status = _field
return nil
}
-
func (p *TProxyResult_) ReadField2(iprot thrift.TProtocol) error {
- p.KafkaMetaResult_ = NewTKafkaMetaProxyResult_()
- if err := p.KafkaMetaResult_.Read(iprot); err != nil {
+ _field := NewTKafkaMetaProxyResult_()
+ if err := _field.Read(iprot); err != nil {
return err
}
+ p.KafkaMetaResult_ = _field
return nil
}
@@ -3381,7 +4114,6 @@ func (p *TProxyResult_) Write(oprot thrift.TProtocol) (err error) {
fieldId = 2
goto WriteFieldError
}
-
}
if err = oprot.WriteFieldStop(); err != nil {
goto WriteFieldStopError
@@ -3441,6 +4173,7 @@ func (p *TProxyResult_) String() string {
return ""
}
return fmt.Sprintf("TProxyResult_(%+v)", *p)
+
}
func (p *TProxyResult_) DeepEqual(ano *TProxyResult_) bool {
@@ -3500,7 +4233,6 @@ func NewTStreamLoadRecord() *TStreamLoadRecord {
}
func (p *TStreamLoadRecord) InitDefault() {
- *p = TStreamLoadRecord{}
}
var TStreamLoadRecord_Cluster_DEFAULT string
@@ -3741,10 +4473,8 @@ func (p *TStreamLoadRecord) Read(iprot thrift.TProtocol) (err error) {
if err = p.ReadField1(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 2:
if fieldTypeId == thrift.STRING {
@@ -3752,10 +4482,8 @@ func (p *TStreamLoadRecord) Read(iprot thrift.TProtocol) (err error) {
goto ReadFieldError
}
issetUser = true
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 3:
if fieldTypeId == thrift.STRING {
@@ -3763,10 +4491,8 @@ func (p *TStreamLoadRecord) Read(iprot thrift.TProtocol) (err error) {
goto ReadFieldError
}
issetPasswd = true
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 4:
if fieldTypeId == thrift.STRING {
@@ -3774,10 +4500,8 @@ func (p *TStreamLoadRecord) Read(iprot thrift.TProtocol) (err error) {
goto ReadFieldError
}
issetDb = true
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 5:
if fieldTypeId == thrift.STRING {
@@ -3785,20 +4509,16 @@ func (p *TStreamLoadRecord) Read(iprot thrift.TProtocol) (err error) {
goto ReadFieldError
}
issetTbl = true
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 6:
if fieldTypeId == thrift.STRING {
if err = p.ReadField6(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 7:
if fieldTypeId == thrift.STRING {
@@ -3806,10 +4526,8 @@ func (p *TStreamLoadRecord) Read(iprot thrift.TProtocol) (err error) {
goto ReadFieldError
}
issetLabel = true
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 8:
if fieldTypeId == thrift.STRING {
@@ -3817,10 +4535,8 @@ func (p *TStreamLoadRecord) Read(iprot thrift.TProtocol) (err error) {
goto ReadFieldError
}
issetStatus = true
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 9:
if fieldTypeId == thrift.STRING {
@@ -3828,30 +4544,24 @@ func (p *TStreamLoadRecord) Read(iprot thrift.TProtocol) (err error) {
goto ReadFieldError
}
issetMessage = true
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 10:
if fieldTypeId == thrift.STRING {
if err = p.ReadField10(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 11:
if fieldTypeId == thrift.I64 {
if err = p.ReadField11(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 12:
if fieldTypeId == thrift.I64 {
@@ -3859,10 +4569,8 @@ func (p *TStreamLoadRecord) Read(iprot thrift.TProtocol) (err error) {
goto ReadFieldError
}
issetTotalRows = true
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 13:
if fieldTypeId == thrift.I64 {
@@ -3870,10 +4578,8 @@ func (p *TStreamLoadRecord) Read(iprot thrift.TProtocol) (err error) {
goto ReadFieldError
}
issetLoadedRows = true
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 14:
if fieldTypeId == thrift.I64 {
@@ -3881,10 +4587,8 @@ func (p *TStreamLoadRecord) Read(iprot thrift.TProtocol) (err error) {
goto ReadFieldError
}
issetFilteredRows = true
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 15:
if fieldTypeId == thrift.I64 {
@@ -3892,10 +4596,8 @@ func (p *TStreamLoadRecord) Read(iprot thrift.TProtocol) (err error) {
goto ReadFieldError
}
issetUnselectedRows = true
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 16:
if fieldTypeId == thrift.I64 {
@@ -3903,10 +4605,8 @@ func (p *TStreamLoadRecord) Read(iprot thrift.TProtocol) (err error) {
goto ReadFieldError
}
issetLoadBytes = true
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 17:
if fieldTypeId == thrift.I64 {
@@ -3914,10 +4614,8 @@ func (p *TStreamLoadRecord) Read(iprot thrift.TProtocol) (err error) {
goto ReadFieldError
}
issetStartTime = true
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 18:
if fieldTypeId == thrift.I64 {
@@ -3925,27 +4623,22 @@ func (p *TStreamLoadRecord) Read(iprot thrift.TProtocol) (err error) {
goto ReadFieldError
}
issetFinishTime = true
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 19:
if fieldTypeId == thrift.STRING {
if err = p.ReadField19(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
default:
if err = iprot.Skip(fieldTypeId); err != nil {
goto SkipFieldError
}
}
-
if err = iprot.ReadFieldEnd(); err != nil {
goto ReadFieldEndError
}
@@ -4042,173 +4735,212 @@ RequiredFieldNotSetError:
}
func (p *TStreamLoadRecord) ReadField1(iprot thrift.TProtocol) error {
+
+ var _field *string
if v, err := iprot.ReadString(); err != nil {
return err
} else {
- p.Cluster = &v
+ _field = &v
}
+ p.Cluster = _field
return nil
}
-
func (p *TStreamLoadRecord) ReadField2(iprot thrift.TProtocol) error {
+
+ var _field string
if v, err := iprot.ReadString(); err != nil {
return err
} else {
- p.User = v
+ _field = v
}
+ p.User = _field
return nil
}
-
func (p *TStreamLoadRecord) ReadField3(iprot thrift.TProtocol) error {
+
+ var _field string
if v, err := iprot.ReadString(); err != nil {
return err
} else {
- p.Passwd = v
+ _field = v
}
+ p.Passwd = _field
return nil
}
-
func (p *TStreamLoadRecord) ReadField4(iprot thrift.TProtocol) error {
+
+ var _field string
if v, err := iprot.ReadString(); err != nil {
return err
} else {
- p.Db = v
+ _field = v
}
+ p.Db = _field
return nil
}
-
func (p *TStreamLoadRecord) ReadField5(iprot thrift.TProtocol) error {
+
+ var _field string
if v, err := iprot.ReadString(); err != nil {
return err
} else {
- p.Tbl = v
+ _field = v
}
+ p.Tbl = _field
return nil
}
-
func (p *TStreamLoadRecord) ReadField6(iprot thrift.TProtocol) error {
+
+ var _field *string
if v, err := iprot.ReadString(); err != nil {
return err
} else {
- p.UserIp = &v
+ _field = &v
}
+ p.UserIp = _field
return nil
}
-
func (p *TStreamLoadRecord) ReadField7(iprot thrift.TProtocol) error {
+
+ var _field string
if v, err := iprot.ReadString(); err != nil {
return err
} else {
- p.Label = v
+ _field = v
}
+ p.Label = _field
return nil
}
-
func (p *TStreamLoadRecord) ReadField8(iprot thrift.TProtocol) error {
+
+ var _field string
if v, err := iprot.ReadString(); err != nil {
return err
} else {
- p.Status = v
+ _field = v
}
+ p.Status = _field
return nil
}
-
func (p *TStreamLoadRecord) ReadField9(iprot thrift.TProtocol) error {
+
+ var _field string
if v, err := iprot.ReadString(); err != nil {
return err
} else {
- p.Message = v
+ _field = v
}
+ p.Message = _field
return nil
}
-
func (p *TStreamLoadRecord) ReadField10(iprot thrift.TProtocol) error {
+
+ var _field *string
if v, err := iprot.ReadString(); err != nil {
return err
} else {
- p.Url = &v
+ _field = &v
}
+ p.Url = _field
return nil
}
-
func (p *TStreamLoadRecord) ReadField11(iprot thrift.TProtocol) error {
+
+ var _field *int64
if v, err := iprot.ReadI64(); err != nil {
return err
} else {
- p.AuthCode = &v
+ _field = &v
}
+ p.AuthCode = _field
return nil
}
-
func (p *TStreamLoadRecord) ReadField12(iprot thrift.TProtocol) error {
+
+ var _field int64
if v, err := iprot.ReadI64(); err != nil {
return err
} else {
- p.TotalRows = v
+ _field = v
}
+ p.TotalRows = _field
return nil
}
-
func (p *TStreamLoadRecord) ReadField13(iprot thrift.TProtocol) error {
+
+ var _field int64
if v, err := iprot.ReadI64(); err != nil {
return err
} else {
- p.LoadedRows = v
+ _field = v
}
+ p.LoadedRows = _field
return nil
}
-
func (p *TStreamLoadRecord) ReadField14(iprot thrift.TProtocol) error {
+
+ var _field int64
if v, err := iprot.ReadI64(); err != nil {
return err
} else {
- p.FilteredRows = v
+ _field = v
}
+ p.FilteredRows = _field
return nil
}
-
func (p *TStreamLoadRecord) ReadField15(iprot thrift.TProtocol) error {
+
+ var _field int64
if v, err := iprot.ReadI64(); err != nil {
return err
} else {
- p.UnselectedRows = v
+ _field = v
}
+ p.UnselectedRows = _field
return nil
}
-
func (p *TStreamLoadRecord) ReadField16(iprot thrift.TProtocol) error {
+
+ var _field int64
if v, err := iprot.ReadI64(); err != nil {
return err
} else {
- p.LoadBytes = v
+ _field = v
}
+ p.LoadBytes = _field
return nil
}
-
func (p *TStreamLoadRecord) ReadField17(iprot thrift.TProtocol) error {
+
+ var _field int64
if v, err := iprot.ReadI64(); err != nil {
return err
} else {
- p.StartTime = v
+ _field = v
}
+ p.StartTime = _field
return nil
}
-
func (p *TStreamLoadRecord) ReadField18(iprot thrift.TProtocol) error {
+
+ var _field int64
if v, err := iprot.ReadI64(); err != nil {
return err
} else {
- p.FinishTime = v
+ _field = v
}
+ p.FinishTime = _field
return nil
}
-
func (p *TStreamLoadRecord) ReadField19(iprot thrift.TProtocol) error {
+
+ var _field *string
if v, err := iprot.ReadString(); err != nil {
return err
} else {
- p.Comment = &v
+ _field = &v
}
+ p.Comment = _field
return nil
}
@@ -4294,7 +5026,6 @@ func (p *TStreamLoadRecord) Write(oprot thrift.TProtocol) (err error) {
fieldId = 19
goto WriteFieldError
}
-
}
if err = oprot.WriteFieldStop(); err != nil {
goto WriteFieldStopError
@@ -4651,6 +5382,7 @@ func (p *TStreamLoadRecord) String() string {
return ""
}
return fmt.Sprintf("TStreamLoadRecord(%+v)", *p)
+
}
func (p *TStreamLoadRecord) DeepEqual(ano *TStreamLoadRecord) bool {
@@ -4887,7 +5619,6 @@ func NewTStreamLoadRecordResult_() *TStreamLoadRecordResult_ {
}
func (p *TStreamLoadRecordResult_) InitDefault() {
- *p = TStreamLoadRecordResult_{}
}
func (p *TStreamLoadRecordResult_) GetStreamLoadRecord() (v map[string]*TStreamLoadRecord) {
@@ -4927,17 +5658,14 @@ func (p *TStreamLoadRecordResult_) Read(iprot thrift.TProtocol) (err error) {
goto ReadFieldError
}
issetStreamLoadRecord = true
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
default:
if err = iprot.Skip(fieldTypeId); err != nil {
goto SkipFieldError
}
}
-
if err = iprot.ReadFieldEnd(); err != nil {
goto ReadFieldEndError
}
@@ -4973,7 +5701,8 @@ func (p *TStreamLoadRecordResult_) ReadField1(iprot thrift.TProtocol) error {
if err != nil {
return err
}
- p.StreamLoadRecord = make(map[string]*TStreamLoadRecord, size)
+ _field := make(map[string]*TStreamLoadRecord, size)
+ values := make([]TStreamLoadRecord, size)
for i := 0; i < size; i++ {
var _key string
if v, err := iprot.ReadString(); err != nil {
@@ -4981,16 +5710,19 @@ func (p *TStreamLoadRecordResult_) ReadField1(iprot thrift.TProtocol) error {
} else {
_key = v
}
- _val := NewTStreamLoadRecord()
+
+ _val := &values[i]
+ _val.InitDefault()
if err := _val.Read(iprot); err != nil {
return err
}
- p.StreamLoadRecord[_key] = _val
+ _field[_key] = _val
}
if err := iprot.ReadMapEnd(); err != nil {
return err
}
+ p.StreamLoadRecord = _field
return nil
}
@@ -5004,7 +5736,6 @@ func (p *TStreamLoadRecordResult_) Write(oprot thrift.TProtocol) (err error) {
fieldId = 1
goto WriteFieldError
}
-
}
if err = oprot.WriteFieldStop(); err != nil {
goto WriteFieldStopError
@@ -5031,11 +5762,9 @@ func (p *TStreamLoadRecordResult_) writeField1(oprot thrift.TProtocol) (err erro
return err
}
for k, v := range p.StreamLoadRecord {
-
if err := oprot.WriteString(k); err != nil {
return err
}
-
if err := v.Write(oprot); err != nil {
return err
}
@@ -5058,6 +5787,7 @@ func (p *TStreamLoadRecordResult_) String() string {
return ""
}
return fmt.Sprintf("TStreamLoadRecordResult_(%+v)", *p)
+
}
func (p *TStreamLoadRecordResult_) DeepEqual(ano *TStreamLoadRecordResult_) bool {
@@ -5097,7 +5827,6 @@ func NewTDiskTrashInfo() *TDiskTrashInfo {
}
func (p *TDiskTrashInfo) InitDefault() {
- *p = TDiskTrashInfo{}
}
func (p *TDiskTrashInfo) GetRootPath() (v string) {
@@ -5155,10 +5884,8 @@ func (p *TDiskTrashInfo) Read(iprot thrift.TProtocol) (err error) {
goto ReadFieldError
}
issetRootPath = true
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 2:
if fieldTypeId == thrift.STRING {
@@ -5166,10 +5893,8 @@ func (p *TDiskTrashInfo) Read(iprot thrift.TProtocol) (err error) {
goto ReadFieldError
}
issetState = true
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 3:
if fieldTypeId == thrift.I64 {
@@ -5177,17 +5902,14 @@ func (p *TDiskTrashInfo) Read(iprot thrift.TProtocol) (err error) {
goto ReadFieldError
}
issetTrashUsedCapacity = true
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
default:
if err = iprot.Skip(fieldTypeId); err != nil {
goto SkipFieldError
}
}
-
if err = iprot.ReadFieldEnd(); err != nil {
goto ReadFieldEndError
}
@@ -5229,29 +5951,36 @@ RequiredFieldNotSetError:
}
func (p *TDiskTrashInfo) ReadField1(iprot thrift.TProtocol) error {
+
+ var _field string
if v, err := iprot.ReadString(); err != nil {
return err
} else {
- p.RootPath = v
+ _field = v
}
+ p.RootPath = _field
return nil
}
-
func (p *TDiskTrashInfo) ReadField2(iprot thrift.TProtocol) error {
+
+ var _field string
if v, err := iprot.ReadString(); err != nil {
return err
} else {
- p.State = v
+ _field = v
}
+ p.State = _field
return nil
}
-
func (p *TDiskTrashInfo) ReadField3(iprot thrift.TProtocol) error {
+
+ var _field int64
if v, err := iprot.ReadI64(); err != nil {
return err
} else {
- p.TrashUsedCapacity = v
+ _field = v
}
+ p.TrashUsedCapacity = _field
return nil
}
@@ -5273,7 +6002,6 @@ func (p *TDiskTrashInfo) Write(oprot thrift.TProtocol) (err error) {
fieldId = 3
goto WriteFieldError
}
-
}
if err = oprot.WriteFieldStop(); err != nil {
goto WriteFieldStopError
@@ -5348,6 +6076,7 @@ func (p *TDiskTrashInfo) String() string {
return ""
}
return fmt.Sprintf("TDiskTrashInfo(%+v)", *p)
+
}
func (p *TDiskTrashInfo) DeepEqual(ano *TDiskTrashInfo) bool {
@@ -5400,7 +6129,6 @@ func NewTCheckStorageFormatResult_() *TCheckStorageFormatResult_ {
}
func (p *TCheckStorageFormatResult_) InitDefault() {
- *p = TCheckStorageFormatResult_{}
}
var TCheckStorageFormatResult__V1Tablets_DEFAULT []int64
@@ -5464,27 +6192,22 @@ func (p *TCheckStorageFormatResult_) Read(iprot thrift.TProtocol) (err error) {
if err = p.ReadField1(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 2:
if fieldTypeId == thrift.LIST {
if err = p.ReadField2(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
default:
if err = iprot.Skip(fieldTypeId); err != nil {
goto SkipFieldError
}
}
-
if err = iprot.ReadFieldEnd(); err != nil {
goto ReadFieldEndError
}
@@ -5514,8 +6237,9 @@ func (p *TCheckStorageFormatResult_) ReadField1(iprot thrift.TProtocol) error {
if err != nil {
return err
}
- p.V1Tablets = make([]int64, 0, size)
+ _field := make([]int64, 0, size)
for i := 0; i < size; i++ {
+
var _elem int64
if v, err := iprot.ReadI64(); err != nil {
return err
@@ -5523,21 +6247,22 @@ func (p *TCheckStorageFormatResult_) ReadField1(iprot thrift.TProtocol) error {
_elem = v
}
- p.V1Tablets = append(p.V1Tablets, _elem)
+ _field = append(_field, _elem)
}
if err := iprot.ReadListEnd(); err != nil {
return err
}
+ p.V1Tablets = _field
return nil
}
-
func (p *TCheckStorageFormatResult_) ReadField2(iprot thrift.TProtocol) error {
_, size, err := iprot.ReadListBegin()
if err != nil {
return err
}
- p.V2Tablets = make([]int64, 0, size)
+ _field := make([]int64, 0, size)
for i := 0; i < size; i++ {
+
var _elem int64
if v, err := iprot.ReadI64(); err != nil {
return err
@@ -5545,11 +6270,12 @@ func (p *TCheckStorageFormatResult_) ReadField2(iprot thrift.TProtocol) error {
_elem = v
}
- p.V2Tablets = append(p.V2Tablets, _elem)
+ _field = append(_field, _elem)
}
if err := iprot.ReadListEnd(); err != nil {
return err
}
+ p.V2Tablets = _field
return nil
}
@@ -5567,7 +6293,6 @@ func (p *TCheckStorageFormatResult_) Write(oprot thrift.TProtocol) (err error) {
fieldId = 2
goto WriteFieldError
}
-
}
if err = oprot.WriteFieldStop(); err != nil {
goto WriteFieldStopError
@@ -5645,6 +6370,7 @@ func (p *TCheckStorageFormatResult_) String() string {
return ""
}
return fmt.Sprintf("TCheckStorageFormatResult_(%+v)", *p)
+
}
func (p *TCheckStorageFormatResult_) DeepEqual(ano *TCheckStorageFormatResult_) bool {
@@ -5689,168 +6415,53 @@ func (p *TCheckStorageFormatResult_) Field2DeepEqual(src []int64) bool {
return true
}
-type TIngestBinlogRequest struct {
- TxnId *int64 `thrift:"txn_id,1,optional" frugal:"1,optional,i64" json:"txn_id,omitempty"`
- RemoteTabletId *int64 `thrift:"remote_tablet_id,2,optional" frugal:"2,optional,i64" json:"remote_tablet_id,omitempty"`
- BinlogVersion *int64 `thrift:"binlog_version,3,optional" frugal:"3,optional,i64" json:"binlog_version,omitempty"`
- RemoteHost *string `thrift:"remote_host,4,optional" frugal:"4,optional,string" json:"remote_host,omitempty"`
- RemotePort *string `thrift:"remote_port,5,optional" frugal:"5,optional,string" json:"remote_port,omitempty"`
- PartitionId *int64 `thrift:"partition_id,6,optional" frugal:"6,optional,i64" json:"partition_id,omitempty"`
- LocalTabletId *int64 `thrift:"local_tablet_id,7,optional" frugal:"7,optional,i64" json:"local_tablet_id,omitempty"`
- LoadId *types.TUniqueId `thrift:"load_id,8,optional" frugal:"8,optional,types.TUniqueId" json:"load_id,omitempty"`
-}
-
-func NewTIngestBinlogRequest() *TIngestBinlogRequest {
- return &TIngestBinlogRequest{}
-}
-
-func (p *TIngestBinlogRequest) InitDefault() {
- *p = TIngestBinlogRequest{}
-}
-
-var TIngestBinlogRequest_TxnId_DEFAULT int64
-
-func (p *TIngestBinlogRequest) GetTxnId() (v int64) {
- if !p.IsSetTxnId() {
- return TIngestBinlogRequest_TxnId_DEFAULT
- }
- return *p.TxnId
-}
-
-var TIngestBinlogRequest_RemoteTabletId_DEFAULT int64
-
-func (p *TIngestBinlogRequest) GetRemoteTabletId() (v int64) {
- if !p.IsSetRemoteTabletId() {
- return TIngestBinlogRequest_RemoteTabletId_DEFAULT
- }
- return *p.RemoteTabletId
-}
-
-var TIngestBinlogRequest_BinlogVersion_DEFAULT int64
-
-func (p *TIngestBinlogRequest) GetBinlogVersion() (v int64) {
- if !p.IsSetBinlogVersion() {
- return TIngestBinlogRequest_BinlogVersion_DEFAULT
- }
- return *p.BinlogVersion
-}
-
-var TIngestBinlogRequest_RemoteHost_DEFAULT string
-
-func (p *TIngestBinlogRequest) GetRemoteHost() (v string) {
- if !p.IsSetRemoteHost() {
- return TIngestBinlogRequest_RemoteHost_DEFAULT
- }
- return *p.RemoteHost
-}
-
-var TIngestBinlogRequest_RemotePort_DEFAULT string
-
-func (p *TIngestBinlogRequest) GetRemotePort() (v string) {
- if !p.IsSetRemotePort() {
- return TIngestBinlogRequest_RemotePort_DEFAULT
- }
- return *p.RemotePort
-}
-
-var TIngestBinlogRequest_PartitionId_DEFAULT int64
-
-func (p *TIngestBinlogRequest) GetPartitionId() (v int64) {
- if !p.IsSetPartitionId() {
- return TIngestBinlogRequest_PartitionId_DEFAULT
- }
- return *p.PartitionId
-}
-
-var TIngestBinlogRequest_LocalTabletId_DEFAULT int64
-
-func (p *TIngestBinlogRequest) GetLocalTabletId() (v int64) {
- if !p.IsSetLocalTabletId() {
- return TIngestBinlogRequest_LocalTabletId_DEFAULT
- }
- return *p.LocalTabletId
-}
-
-var TIngestBinlogRequest_LoadId_DEFAULT *types.TUniqueId
-
-func (p *TIngestBinlogRequest) GetLoadId() (v *types.TUniqueId) {
- if !p.IsSetLoadId() {
- return TIngestBinlogRequest_LoadId_DEFAULT
- }
- return p.LoadId
-}
-func (p *TIngestBinlogRequest) SetTxnId(val *int64) {
- p.TxnId = val
-}
-func (p *TIngestBinlogRequest) SetRemoteTabletId(val *int64) {
- p.RemoteTabletId = val
-}
-func (p *TIngestBinlogRequest) SetBinlogVersion(val *int64) {
- p.BinlogVersion = val
-}
-func (p *TIngestBinlogRequest) SetRemoteHost(val *string) {
- p.RemoteHost = val
-}
-func (p *TIngestBinlogRequest) SetRemotePort(val *string) {
- p.RemotePort = val
-}
-func (p *TIngestBinlogRequest) SetPartitionId(val *int64) {
- p.PartitionId = val
-}
-func (p *TIngestBinlogRequest) SetLocalTabletId(val *int64) {
- p.LocalTabletId = val
-}
-func (p *TIngestBinlogRequest) SetLoadId(val *types.TUniqueId) {
- p.LoadId = val
+type TWarmUpCacheAsyncRequest struct {
+ Host string `thrift:"host,1,required" frugal:"1,required,string" json:"host"`
+ BrpcPort int32 `thrift:"brpc_port,2,required" frugal:"2,required,i32" json:"brpc_port"`
+ TabletIds []int64 `thrift:"tablet_ids,3,required" frugal:"3,required,list" json:"tablet_ids"`
}
-var fieldIDToName_TIngestBinlogRequest = map[int16]string{
- 1: "txn_id",
- 2: "remote_tablet_id",
- 3: "binlog_version",
- 4: "remote_host",
- 5: "remote_port",
- 6: "partition_id",
- 7: "local_tablet_id",
- 8: "load_id",
+func NewTWarmUpCacheAsyncRequest() *TWarmUpCacheAsyncRequest {
+ return &TWarmUpCacheAsyncRequest{}
}
-func (p *TIngestBinlogRequest) IsSetTxnId() bool {
- return p.TxnId != nil
+func (p *TWarmUpCacheAsyncRequest) InitDefault() {
}
-func (p *TIngestBinlogRequest) IsSetRemoteTabletId() bool {
- return p.RemoteTabletId != nil
+func (p *TWarmUpCacheAsyncRequest) GetHost() (v string) {
+ return p.Host
}
-func (p *TIngestBinlogRequest) IsSetBinlogVersion() bool {
- return p.BinlogVersion != nil
+func (p *TWarmUpCacheAsyncRequest) GetBrpcPort() (v int32) {
+ return p.BrpcPort
}
-func (p *TIngestBinlogRequest) IsSetRemoteHost() bool {
- return p.RemoteHost != nil
+func (p *TWarmUpCacheAsyncRequest) GetTabletIds() (v []int64) {
+ return p.TabletIds
}
-
-func (p *TIngestBinlogRequest) IsSetRemotePort() bool {
- return p.RemotePort != nil
+func (p *TWarmUpCacheAsyncRequest) SetHost(val string) {
+ p.Host = val
}
-
-func (p *TIngestBinlogRequest) IsSetPartitionId() bool {
- return p.PartitionId != nil
+func (p *TWarmUpCacheAsyncRequest) SetBrpcPort(val int32) {
+ p.BrpcPort = val
}
-
-func (p *TIngestBinlogRequest) IsSetLocalTabletId() bool {
- return p.LocalTabletId != nil
+func (p *TWarmUpCacheAsyncRequest) SetTabletIds(val []int64) {
+ p.TabletIds = val
}
-func (p *TIngestBinlogRequest) IsSetLoadId() bool {
- return p.LoadId != nil
+var fieldIDToName_TWarmUpCacheAsyncRequest = map[int16]string{
+ 1: "host",
+ 2: "brpc_port",
+ 3: "tablet_ids",
}
-func (p *TIngestBinlogRequest) Read(iprot thrift.TProtocol) (err error) {
+func (p *TWarmUpCacheAsyncRequest) Read(iprot thrift.TProtocol) (err error) {
var fieldTypeId thrift.TType
var fieldId int16
+ var issetHost bool = false
+ var issetBrpcPort bool = false
+ var issetTabletIds bool = false
if _, err = iprot.ReadStructBegin(); err != nil {
goto ReadStructBeginError
@@ -5867,91 +6478,37 @@ func (p *TIngestBinlogRequest) Read(iprot thrift.TProtocol) (err error) {
switch fieldId {
case 1:
- if fieldTypeId == thrift.I64 {
+ if fieldTypeId == thrift.STRING {
if err = p.ReadField1(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ issetHost = true
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 2:
- if fieldTypeId == thrift.I64 {
+ if fieldTypeId == thrift.I32 {
if err = p.ReadField2(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ issetBrpcPort = true
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 3:
- if fieldTypeId == thrift.I64 {
+ if fieldTypeId == thrift.LIST {
if err = p.ReadField3(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ issetTabletIds = true
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
- case 4:
- if fieldTypeId == thrift.STRING {
- if err = p.ReadField4(iprot); err != nil {
- goto ReadFieldError
- }
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
- }
- case 5:
- if fieldTypeId == thrift.STRING {
- if err = p.ReadField5(iprot); err != nil {
- goto ReadFieldError
- }
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
- }
- case 6:
- if fieldTypeId == thrift.I64 {
- if err = p.ReadField6(iprot); err != nil {
- goto ReadFieldError
- }
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
- }
- case 7:
- if fieldTypeId == thrift.I64 {
- if err = p.ReadField7(iprot); err != nil {
- goto ReadFieldError
- }
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
- }
- case 8:
- if fieldTypeId == thrift.STRUCT {
- if err = p.ReadField8(iprot); err != nil {
- goto ReadFieldError
- }
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
- }
- default:
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
+ default:
+ if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
}
-
if err = iprot.ReadFieldEnd(); err != nil {
goto ReadFieldEndError
}
@@ -5960,13 +6517,27 @@ func (p *TIngestBinlogRequest) Read(iprot thrift.TProtocol) (err error) {
goto ReadStructEndError
}
+ if !issetHost {
+ fieldId = 1
+ goto RequiredFieldNotSetError
+ }
+
+ if !issetBrpcPort {
+ fieldId = 2
+ goto RequiredFieldNotSetError
+ }
+
+ if !issetTabletIds {
+ fieldId = 3
+ goto RequiredFieldNotSetError
+ }
return nil
ReadStructBeginError:
return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err)
ReadFieldBeginError:
return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
ReadFieldError:
- return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TIngestBinlogRequest[fieldId]), err)
+ return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TWarmUpCacheAsyncRequest[fieldId]), err)
SkipFieldError:
return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
@@ -5974,82 +6545,59 @@ ReadFieldEndError:
return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err)
ReadStructEndError:
return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+RequiredFieldNotSetError:
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TWarmUpCacheAsyncRequest[fieldId]))
}
-func (p *TIngestBinlogRequest) ReadField1(iprot thrift.TProtocol) error {
- if v, err := iprot.ReadI64(); err != nil {
- return err
- } else {
- p.TxnId = &v
- }
- return nil
-}
-
-func (p *TIngestBinlogRequest) ReadField2(iprot thrift.TProtocol) error {
- if v, err := iprot.ReadI64(); err != nil {
- return err
- } else {
- p.RemoteTabletId = &v
- }
- return nil
-}
-
-func (p *TIngestBinlogRequest) ReadField3(iprot thrift.TProtocol) error {
- if v, err := iprot.ReadI64(); err != nil {
- return err
- } else {
- p.BinlogVersion = &v
- }
- return nil
-}
+func (p *TWarmUpCacheAsyncRequest) ReadField1(iprot thrift.TProtocol) error {
-func (p *TIngestBinlogRequest) ReadField4(iprot thrift.TProtocol) error {
+ var _field string
if v, err := iprot.ReadString(); err != nil {
return err
} else {
- p.RemoteHost = &v
+ _field = v
}
+ p.Host = _field
return nil
}
+func (p *TWarmUpCacheAsyncRequest) ReadField2(iprot thrift.TProtocol) error {
-func (p *TIngestBinlogRequest) ReadField5(iprot thrift.TProtocol) error {
- if v, err := iprot.ReadString(); err != nil {
+ var _field int32
+ if v, err := iprot.ReadI32(); err != nil {
return err
} else {
- p.RemotePort = &v
+ _field = v
}
+ p.BrpcPort = _field
return nil
}
-
-func (p *TIngestBinlogRequest) ReadField6(iprot thrift.TProtocol) error {
- if v, err := iprot.ReadI64(); err != nil {
+func (p *TWarmUpCacheAsyncRequest) ReadField3(iprot thrift.TProtocol) error {
+ _, size, err := iprot.ReadListBegin()
+ if err != nil {
return err
- } else {
- p.PartitionId = &v
}
- return nil
-}
+ _field := make([]int64, 0, size)
+ for i := 0; i < size; i++ {
-func (p *TIngestBinlogRequest) ReadField7(iprot thrift.TProtocol) error {
- if v, err := iprot.ReadI64(); err != nil {
- return err
- } else {
- p.LocalTabletId = &v
- }
- return nil
-}
+ var _elem int64
+ if v, err := iprot.ReadI64(); err != nil {
+ return err
+ } else {
+ _elem = v
+ }
-func (p *TIngestBinlogRequest) ReadField8(iprot thrift.TProtocol) error {
- p.LoadId = types.NewTUniqueId()
- if err := p.LoadId.Read(iprot); err != nil {
+ _field = append(_field, _elem)
+ }
+ if err := iprot.ReadListEnd(); err != nil {
return err
}
+ p.TabletIds = _field
return nil
}
-func (p *TIngestBinlogRequest) Write(oprot thrift.TProtocol) (err error) {
+func (p *TWarmUpCacheAsyncRequest) Write(oprot thrift.TProtocol) (err error) {
var fieldId int16
- if err = oprot.WriteStructBegin("TIngestBinlogRequest"); err != nil {
+ if err = oprot.WriteStructBegin("TWarmUpCacheAsyncRequest"); err != nil {
goto WriteStructBeginError
}
if p != nil {
@@ -6065,27 +6613,6 @@ func (p *TIngestBinlogRequest) Write(oprot thrift.TProtocol) (err error) {
fieldId = 3
goto WriteFieldError
}
- if err = p.writeField4(oprot); err != nil {
- fieldId = 4
- goto WriteFieldError
- }
- if err = p.writeField5(oprot); err != nil {
- fieldId = 5
- goto WriteFieldError
- }
- if err = p.writeField6(oprot); err != nil {
- fieldId = 6
- goto WriteFieldError
- }
- if err = p.writeField7(oprot); err != nil {
- fieldId = 7
- goto WriteFieldError
- }
- if err = p.writeField8(oprot); err != nil {
- fieldId = 8
- goto WriteFieldError
- }
-
}
if err = oprot.WriteFieldStop(); err != nil {
goto WriteFieldStopError
@@ -6104,17 +6631,15 @@ WriteStructEndError:
return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err)
}
-func (p *TIngestBinlogRequest) writeField1(oprot thrift.TProtocol) (err error) {
- if p.IsSetTxnId() {
- if err = oprot.WriteFieldBegin("txn_id", thrift.I64, 1); err != nil {
- goto WriteFieldBeginError
- }
- if err := oprot.WriteI64(*p.TxnId); err != nil {
- return err
- }
- if err = oprot.WriteFieldEnd(); err != nil {
- goto WriteFieldEndError
- }
+func (p *TWarmUpCacheAsyncRequest) writeField1(oprot thrift.TProtocol) (err error) {
+ if err = oprot.WriteFieldBegin("host", thrift.STRING, 1); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteString(p.Host); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
}
return nil
WriteFieldBeginError:
@@ -6123,17 +6648,15 @@ WriteFieldEndError:
return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err)
}
-func (p *TIngestBinlogRequest) writeField2(oprot thrift.TProtocol) (err error) {
- if p.IsSetRemoteTabletId() {
- if err = oprot.WriteFieldBegin("remote_tablet_id", thrift.I64, 2); err != nil {
- goto WriteFieldBeginError
- }
- if err := oprot.WriteI64(*p.RemoteTabletId); err != nil {
- return err
- }
- if err = oprot.WriteFieldEnd(); err != nil {
- goto WriteFieldEndError
- }
+func (p *TWarmUpCacheAsyncRequest) writeField2(oprot thrift.TProtocol) (err error) {
+ if err = oprot.WriteFieldBegin("brpc_port", thrift.I32, 2); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteI32(p.BrpcPort); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
}
return nil
WriteFieldBeginError:
@@ -6142,17 +6665,23 @@ WriteFieldEndError:
return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err)
}
-func (p *TIngestBinlogRequest) writeField3(oprot thrift.TProtocol) (err error) {
- if p.IsSetBinlogVersion() {
- if err = oprot.WriteFieldBegin("binlog_version", thrift.I64, 3); err != nil {
- goto WriteFieldBeginError
- }
- if err := oprot.WriteI64(*p.BinlogVersion); err != nil {
+func (p *TWarmUpCacheAsyncRequest) writeField3(oprot thrift.TProtocol) (err error) {
+ if err = oprot.WriteFieldBegin("tablet_ids", thrift.LIST, 3); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteListBegin(thrift.I64, len(p.TabletIds)); err != nil {
+ return err
+ }
+ for _, v := range p.TabletIds {
+ if err := oprot.WriteI64(v); err != nil {
return err
}
- if err = oprot.WriteFieldEnd(); err != nil {
- goto WriteFieldEndError
- }
+ }
+ if err := oprot.WriteListEnd(); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
}
return nil
WriteFieldBeginError:
@@ -6161,269 +6690,96 @@ WriteFieldEndError:
return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err)
}
-func (p *TIngestBinlogRequest) writeField4(oprot thrift.TProtocol) (err error) {
- if p.IsSetRemoteHost() {
- if err = oprot.WriteFieldBegin("remote_host", thrift.STRING, 4); err != nil {
- goto WriteFieldBeginError
- }
- if err := oprot.WriteString(*p.RemoteHost); err != nil {
- return err
- }
- if err = oprot.WriteFieldEnd(); err != nil {
- goto WriteFieldEndError
- }
+func (p *TWarmUpCacheAsyncRequest) String() string {
+ if p == nil {
+ return ""
}
- return nil
-WriteFieldBeginError:
- return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err)
-WriteFieldEndError:
- return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err)
-}
+ return fmt.Sprintf("TWarmUpCacheAsyncRequest(%+v)", *p)
-func (p *TIngestBinlogRequest) writeField5(oprot thrift.TProtocol) (err error) {
- if p.IsSetRemotePort() {
- if err = oprot.WriteFieldBegin("remote_port", thrift.STRING, 5); err != nil {
- goto WriteFieldBeginError
- }
- if err := oprot.WriteString(*p.RemotePort); err != nil {
- return err
- }
- if err = oprot.WriteFieldEnd(); err != nil {
- goto WriteFieldEndError
- }
- }
- return nil
-WriteFieldBeginError:
- return thrift.PrependError(fmt.Sprintf("%T write field 5 begin error: ", p), err)
-WriteFieldEndError:
- return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err)
}
-func (p *TIngestBinlogRequest) writeField6(oprot thrift.TProtocol) (err error) {
- if p.IsSetPartitionId() {
- if err = oprot.WriteFieldBegin("partition_id", thrift.I64, 6); err != nil {
- goto WriteFieldBeginError
- }
- if err := oprot.WriteI64(*p.PartitionId); err != nil {
- return err
- }
- if err = oprot.WriteFieldEnd(); err != nil {
- goto WriteFieldEndError
- }
+func (p *TWarmUpCacheAsyncRequest) DeepEqual(ano *TWarmUpCacheAsyncRequest) bool {
+ if p == ano {
+ return true
+ } else if p == nil || ano == nil {
+ return false
}
- return nil
-WriteFieldBeginError:
- return thrift.PrependError(fmt.Sprintf("%T write field 6 begin error: ", p), err)
-WriteFieldEndError:
- return thrift.PrependError(fmt.Sprintf("%T write field 6 end error: ", p), err)
+ if !p.Field1DeepEqual(ano.Host) {
+ return false
+ }
+ if !p.Field2DeepEqual(ano.BrpcPort) {
+ return false
+ }
+ if !p.Field3DeepEqual(ano.TabletIds) {
+ return false
+ }
+ return true
}
-func (p *TIngestBinlogRequest) writeField7(oprot thrift.TProtocol) (err error) {
- if p.IsSetLocalTabletId() {
- if err = oprot.WriteFieldBegin("local_tablet_id", thrift.I64, 7); err != nil {
- goto WriteFieldBeginError
- }
- if err := oprot.WriteI64(*p.LocalTabletId); err != nil {
- return err
- }
- if err = oprot.WriteFieldEnd(); err != nil {
- goto WriteFieldEndError
- }
+func (p *TWarmUpCacheAsyncRequest) Field1DeepEqual(src string) bool {
+
+ if strings.Compare(p.Host, src) != 0 {
+ return false
}
- return nil
-WriteFieldBeginError:
- return thrift.PrependError(fmt.Sprintf("%T write field 7 begin error: ", p), err)
-WriteFieldEndError:
- return thrift.PrependError(fmt.Sprintf("%T write field 7 end error: ", p), err)
+ return true
}
+func (p *TWarmUpCacheAsyncRequest) Field2DeepEqual(src int32) bool {
-func (p *TIngestBinlogRequest) writeField8(oprot thrift.TProtocol) (err error) {
- if p.IsSetLoadId() {
- if err = oprot.WriteFieldBegin("load_id", thrift.STRUCT, 8); err != nil {
- goto WriteFieldBeginError
- }
- if err := p.LoadId.Write(oprot); err != nil {
- return err
- }
- if err = oprot.WriteFieldEnd(); err != nil {
- goto WriteFieldEndError
- }
- }
- return nil
-WriteFieldBeginError:
- return thrift.PrependError(fmt.Sprintf("%T write field 8 begin error: ", p), err)
-WriteFieldEndError:
- return thrift.PrependError(fmt.Sprintf("%T write field 8 end error: ", p), err)
-}
-
-func (p *TIngestBinlogRequest) String() string {
- if p == nil {
- return ""
- }
- return fmt.Sprintf("TIngestBinlogRequest(%+v)", *p)
-}
-
-func (p *TIngestBinlogRequest) DeepEqual(ano *TIngestBinlogRequest) bool {
- if p == ano {
- return true
- } else if p == nil || ano == nil {
- return false
- }
- if !p.Field1DeepEqual(ano.TxnId) {
- return false
- }
- if !p.Field2DeepEqual(ano.RemoteTabletId) {
- return false
- }
- if !p.Field3DeepEqual(ano.BinlogVersion) {
- return false
- }
- if !p.Field4DeepEqual(ano.RemoteHost) {
- return false
- }
- if !p.Field5DeepEqual(ano.RemotePort) {
- return false
- }
- if !p.Field6DeepEqual(ano.PartitionId) {
- return false
- }
- if !p.Field7DeepEqual(ano.LocalTabletId) {
- return false
- }
- if !p.Field8DeepEqual(ano.LoadId) {
- return false
- }
- return true
-}
-
-func (p *TIngestBinlogRequest) Field1DeepEqual(src *int64) bool {
-
- if p.TxnId == src {
- return true
- } else if p.TxnId == nil || src == nil {
- return false
- }
- if *p.TxnId != *src {
- return false
- }
- return true
-}
-func (p *TIngestBinlogRequest) Field2DeepEqual(src *int64) bool {
-
- if p.RemoteTabletId == src {
- return true
- } else if p.RemoteTabletId == nil || src == nil {
- return false
- }
- if *p.RemoteTabletId != *src {
- return false
- }
- return true
-}
-func (p *TIngestBinlogRequest) Field3DeepEqual(src *int64) bool {
-
- if p.BinlogVersion == src {
- return true
- } else if p.BinlogVersion == nil || src == nil {
- return false
- }
- if *p.BinlogVersion != *src {
- return false
- }
- return true
-}
-func (p *TIngestBinlogRequest) Field4DeepEqual(src *string) bool {
-
- if p.RemoteHost == src {
- return true
- } else if p.RemoteHost == nil || src == nil {
- return false
- }
- if strings.Compare(*p.RemoteHost, *src) != 0 {
- return false
- }
- return true
-}
-func (p *TIngestBinlogRequest) Field5DeepEqual(src *string) bool {
-
- if p.RemotePort == src {
- return true
- } else if p.RemotePort == nil || src == nil {
- return false
- }
- if strings.Compare(*p.RemotePort, *src) != 0 {
- return false
- }
- return true
-}
-func (p *TIngestBinlogRequest) Field6DeepEqual(src *int64) bool {
-
- if p.PartitionId == src {
- return true
- } else if p.PartitionId == nil || src == nil {
- return false
- }
- if *p.PartitionId != *src {
+ if p.BrpcPort != src {
return false
}
return true
}
-func (p *TIngestBinlogRequest) Field7DeepEqual(src *int64) bool {
+func (p *TWarmUpCacheAsyncRequest) Field3DeepEqual(src []int64) bool {
- if p.LocalTabletId == src {
- return true
- } else if p.LocalTabletId == nil || src == nil {
- return false
- }
- if *p.LocalTabletId != *src {
+ if len(p.TabletIds) != len(src) {
return false
}
- return true
-}
-func (p *TIngestBinlogRequest) Field8DeepEqual(src *types.TUniqueId) bool {
-
- if !p.LoadId.DeepEqual(src) {
- return false
+ for i, v := range p.TabletIds {
+ _src := src[i]
+ if v != _src {
+ return false
+ }
}
return true
}
-type TIngestBinlogResult_ struct {
- Status *status.TStatus `thrift:"status,1,optional" frugal:"1,optional,status.TStatus" json:"status,omitempty"`
+type TWarmUpCacheAsyncResponse struct {
+ Status *status.TStatus `thrift:"status,1,required" frugal:"1,required,status.TStatus" json:"status"`
}
-func NewTIngestBinlogResult_() *TIngestBinlogResult_ {
- return &TIngestBinlogResult_{}
+func NewTWarmUpCacheAsyncResponse() *TWarmUpCacheAsyncResponse {
+ return &TWarmUpCacheAsyncResponse{}
}
-func (p *TIngestBinlogResult_) InitDefault() {
- *p = TIngestBinlogResult_{}
+func (p *TWarmUpCacheAsyncResponse) InitDefault() {
}
-var TIngestBinlogResult__Status_DEFAULT *status.TStatus
+var TWarmUpCacheAsyncResponse_Status_DEFAULT *status.TStatus
-func (p *TIngestBinlogResult_) GetStatus() (v *status.TStatus) {
+func (p *TWarmUpCacheAsyncResponse) GetStatus() (v *status.TStatus) {
if !p.IsSetStatus() {
- return TIngestBinlogResult__Status_DEFAULT
+ return TWarmUpCacheAsyncResponse_Status_DEFAULT
}
return p.Status
}
-func (p *TIngestBinlogResult_) SetStatus(val *status.TStatus) {
+func (p *TWarmUpCacheAsyncResponse) SetStatus(val *status.TStatus) {
p.Status = val
}
-var fieldIDToName_TIngestBinlogResult_ = map[int16]string{
+var fieldIDToName_TWarmUpCacheAsyncResponse = map[int16]string{
1: "status",
}
-func (p *TIngestBinlogResult_) IsSetStatus() bool {
+func (p *TWarmUpCacheAsyncResponse) IsSetStatus() bool {
return p.Status != nil
}
-func (p *TIngestBinlogResult_) Read(iprot thrift.TProtocol) (err error) {
+func (p *TWarmUpCacheAsyncResponse) Read(iprot thrift.TProtocol) (err error) {
var fieldTypeId thrift.TType
var fieldId int16
+ var issetStatus bool = false
if _, err = iprot.ReadStructBegin(); err != nil {
goto ReadStructBeginError
@@ -6444,17 +6800,15 @@ func (p *TIngestBinlogResult_) Read(iprot thrift.TProtocol) (err error) {
if err = p.ReadField1(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ issetStatus = true
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
default:
if err = iprot.Skip(fieldTypeId); err != nil {
goto SkipFieldError
}
}
-
if err = iprot.ReadFieldEnd(); err != nil {
goto ReadFieldEndError
}
@@ -6463,13 +6817,17 @@ func (p *TIngestBinlogResult_) Read(iprot thrift.TProtocol) (err error) {
goto ReadStructEndError
}
+ if !issetStatus {
+ fieldId = 1
+ goto RequiredFieldNotSetError
+ }
return nil
ReadStructBeginError:
return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err)
ReadFieldBeginError:
return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
ReadFieldError:
- return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TIngestBinlogResult_[fieldId]), err)
+ return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TWarmUpCacheAsyncResponse[fieldId]), err)
SkipFieldError:
return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
@@ -6477,19 +6835,22 @@ ReadFieldEndError:
return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err)
ReadStructEndError:
return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+RequiredFieldNotSetError:
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TWarmUpCacheAsyncResponse[fieldId]))
}
-func (p *TIngestBinlogResult_) ReadField1(iprot thrift.TProtocol) error {
- p.Status = status.NewTStatus()
- if err := p.Status.Read(iprot); err != nil {
+func (p *TWarmUpCacheAsyncResponse) ReadField1(iprot thrift.TProtocol) error {
+ _field := status.NewTStatus()
+ if err := _field.Read(iprot); err != nil {
return err
}
+ p.Status = _field
return nil
}
-func (p *TIngestBinlogResult_) Write(oprot thrift.TProtocol) (err error) {
+func (p *TWarmUpCacheAsyncResponse) Write(oprot thrift.TProtocol) (err error) {
var fieldId int16
- if err = oprot.WriteStructBegin("TIngestBinlogResult"); err != nil {
+ if err = oprot.WriteStructBegin("TWarmUpCacheAsyncResponse"); err != nil {
goto WriteStructBeginError
}
if p != nil {
@@ -6497,7 +6858,6 @@ func (p *TIngestBinlogResult_) Write(oprot thrift.TProtocol) (err error) {
fieldId = 1
goto WriteFieldError
}
-
}
if err = oprot.WriteFieldStop(); err != nil {
goto WriteFieldStopError
@@ -6516,17 +6876,15 @@ WriteStructEndError:
return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err)
}
-func (p *TIngestBinlogResult_) writeField1(oprot thrift.TProtocol) (err error) {
- if p.IsSetStatus() {
- if err = oprot.WriteFieldBegin("status", thrift.STRUCT, 1); err != nil {
- goto WriteFieldBeginError
- }
- if err := p.Status.Write(oprot); err != nil {
- return err
- }
- if err = oprot.WriteFieldEnd(); err != nil {
- goto WriteFieldEndError
- }
+func (p *TWarmUpCacheAsyncResponse) writeField1(oprot thrift.TProtocol) (err error) {
+ if err = oprot.WriteFieldBegin("status", thrift.STRUCT, 1); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := p.Status.Write(oprot); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
}
return nil
WriteFieldBeginError:
@@ -6535,14 +6893,15 @@ WriteFieldEndError:
return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err)
}
-func (p *TIngestBinlogResult_) String() string {
+func (p *TWarmUpCacheAsyncResponse) String() string {
if p == nil {
return ""
}
- return fmt.Sprintf("TIngestBinlogResult_(%+v)", *p)
+ return fmt.Sprintf("TWarmUpCacheAsyncResponse(%+v)", *p)
+
}
-func (p *TIngestBinlogResult_) DeepEqual(ano *TIngestBinlogResult_) bool {
+func (p *TWarmUpCacheAsyncResponse) DeepEqual(ano *TWarmUpCacheAsyncResponse) bool {
if p == ano {
return true
} else if p == nil || ano == nil {
@@ -6554,7 +6913,7 @@ func (p *TIngestBinlogResult_) DeepEqual(ano *TIngestBinlogResult_) bool {
return true
}
-func (p *TIngestBinlogResult_) Field1DeepEqual(src *status.TStatus) bool {
+func (p *TWarmUpCacheAsyncResponse) Field1DeepEqual(src *status.TStatus) bool {
if !p.Status.DeepEqual(src) {
return false
@@ -6562,1333 +6921,12966 @@ func (p *TIngestBinlogResult_) Field1DeepEqual(src *status.TStatus) bool {
return true
}
-type BackendService interface {
- ExecPlanFragment(ctx context.Context, params *palointernalservice.TExecPlanFragmentParams) (r *palointernalservice.TExecPlanFragmentResult_, err error)
-
- CancelPlanFragment(ctx context.Context, params *palointernalservice.TCancelPlanFragmentParams) (r *palointernalservice.TCancelPlanFragmentResult_, err error)
-
- TransmitData(ctx context.Context, params *palointernalservice.TTransmitDataParams) (r *palointernalservice.TTransmitDataResult_, err error)
-
- SubmitTasks(ctx context.Context, tasks []*agentservice.TAgentTaskRequest) (r *agentservice.TAgentResult_, err error)
-
- MakeSnapshot(ctx context.Context, snapshotRequest *agentservice.TSnapshotRequest) (r *agentservice.TAgentResult_, err error)
-
- ReleaseSnapshot(ctx context.Context, snapshotPath string) (r *agentservice.TAgentResult_, err error)
-
- PublishClusterState(ctx context.Context, request *agentservice.TAgentPublishRequest) (r *agentservice.TAgentResult_, err error)
-
- SubmitExportTask(ctx context.Context, request *TExportTaskRequest) (r *status.TStatus, err error)
-
- GetExportStatus(ctx context.Context, taskId *types.TUniqueId) (r *palointernalservice.TExportStatusResult_, err error)
-
- EraseExportTask(ctx context.Context, taskId *types.TUniqueId) (r *status.TStatus, err error)
-
- GetTabletStat(ctx context.Context) (r *TTabletStatResult_, err error)
-
- GetTrashUsedCapacity(ctx context.Context) (r int64, err error)
-
- GetDiskTrashUsedCapacity(ctx context.Context) (r []*TDiskTrashInfo, err error)
-
- SubmitRoutineLoadTask(ctx context.Context, tasks []*TRoutineLoadTask) (r *status.TStatus, err error)
-
- OpenScanner(ctx context.Context, params *dorisexternalservice.TScanOpenParams) (r *dorisexternalservice.TScanOpenResult_, err error)
-
- GetNext(ctx context.Context, params *dorisexternalservice.TScanNextBatchParams) (r *dorisexternalservice.TScanBatchResult_, err error)
-
- CloseScanner(ctx context.Context, params *dorisexternalservice.TScanCloseParams) (r *dorisexternalservice.TScanCloseResult_, err error)
-
- GetStreamLoadRecord(ctx context.Context, lastStreamRecordTime int64) (r *TStreamLoadRecordResult_, err error)
-
- CleanTrash(ctx context.Context) (err error)
-
- CheckStorageFormat(ctx context.Context) (r *TCheckStorageFormatResult_, err error)
-
- IngestBinlog(ctx context.Context, ingestBinlogRequest *TIngestBinlogRequest) (r *TIngestBinlogResult_, err error)
+type TCheckWarmUpCacheAsyncRequest struct {
+ Tablets []int64 `thrift:"tablets,1,optional" frugal:"1,optional,list" json:"tablets,omitempty"`
}
-type BackendServiceClient struct {
- c thrift.TClient
+func NewTCheckWarmUpCacheAsyncRequest() *TCheckWarmUpCacheAsyncRequest {
+ return &TCheckWarmUpCacheAsyncRequest{}
}
-func NewBackendServiceClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *BackendServiceClient {
- return &BackendServiceClient{
- c: thrift.NewTStandardClient(f.GetProtocol(t), f.GetProtocol(t)),
- }
+func (p *TCheckWarmUpCacheAsyncRequest) InitDefault() {
}
-func NewBackendServiceClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *BackendServiceClient {
- return &BackendServiceClient{
- c: thrift.NewTStandardClient(iprot, oprot),
+var TCheckWarmUpCacheAsyncRequest_Tablets_DEFAULT []int64
+
+func (p *TCheckWarmUpCacheAsyncRequest) GetTablets() (v []int64) {
+ if !p.IsSetTablets() {
+ return TCheckWarmUpCacheAsyncRequest_Tablets_DEFAULT
}
+ return p.Tablets
+}
+func (p *TCheckWarmUpCacheAsyncRequest) SetTablets(val []int64) {
+ p.Tablets = val
}
-func NewBackendServiceClient(c thrift.TClient) *BackendServiceClient {
- return &BackendServiceClient{
- c: c,
- }
+var fieldIDToName_TCheckWarmUpCacheAsyncRequest = map[int16]string{
+ 1: "tablets",
}
-func (p *BackendServiceClient) Client_() thrift.TClient {
- return p.c
+func (p *TCheckWarmUpCacheAsyncRequest) IsSetTablets() bool {
+ return p.Tablets != nil
}
-func (p *BackendServiceClient) ExecPlanFragment(ctx context.Context, params *palointernalservice.TExecPlanFragmentParams) (r *palointernalservice.TExecPlanFragmentResult_, err error) {
- var _args BackendServiceExecPlanFragmentArgs
- _args.Params = params
- var _result BackendServiceExecPlanFragmentResult
- if err = p.Client_().Call(ctx, "exec_plan_fragment", &_args, &_result); err != nil {
- return
+func (p *TCheckWarmUpCacheAsyncRequest) Read(iprot thrift.TProtocol) (err error) {
+
+ var fieldTypeId thrift.TType
+ var fieldId int16
+
+ if _, err = iprot.ReadStructBegin(); err != nil {
+ goto ReadStructBeginError
}
- return _result.GetSuccess(), nil
-}
-func (p *BackendServiceClient) CancelPlanFragment(ctx context.Context, params *palointernalservice.TCancelPlanFragmentParams) (r *palointernalservice.TCancelPlanFragmentResult_, err error) {
- var _args BackendServiceCancelPlanFragmentArgs
- _args.Params = params
- var _result BackendServiceCancelPlanFragmentResult
- if err = p.Client_().Call(ctx, "cancel_plan_fragment", &_args, &_result); err != nil {
- return
+
+ for {
+ _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin()
+ if err != nil {
+ goto ReadFieldBeginError
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+
+ switch fieldId {
+ case 1:
+ if fieldTypeId == thrift.LIST {
+ if err = p.ReadField1(iprot); err != nil {
+ goto ReadFieldError
+ }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ default:
+ if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ }
+ if err = iprot.ReadFieldEnd(); err != nil {
+ goto ReadFieldEndError
+ }
}
- return _result.GetSuccess(), nil
-}
-func (p *BackendServiceClient) TransmitData(ctx context.Context, params *palointernalservice.TTransmitDataParams) (r *palointernalservice.TTransmitDataResult_, err error) {
- var _args BackendServiceTransmitDataArgs
- _args.Params = params
- var _result BackendServiceTransmitDataResult
- if err = p.Client_().Call(ctx, "transmit_data", &_args, &_result); err != nil {
- return
+ if err = iprot.ReadStructEnd(); err != nil {
+ goto ReadStructEndError
}
- return _result.GetSuccess(), nil
+
+ return nil
+ReadStructBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err)
+ReadFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
+ReadFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TCheckWarmUpCacheAsyncRequest[fieldId]), err)
+SkipFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
+
+ReadFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err)
+ReadStructEndError:
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
}
-func (p *BackendServiceClient) SubmitTasks(ctx context.Context, tasks []*agentservice.TAgentTaskRequest) (r *agentservice.TAgentResult_, err error) {
- var _args BackendServiceSubmitTasksArgs
- _args.Tasks = tasks
- var _result BackendServiceSubmitTasksResult
- if err = p.Client_().Call(ctx, "submit_tasks", &_args, &_result); err != nil {
- return
+
+func (p *TCheckWarmUpCacheAsyncRequest) ReadField1(iprot thrift.TProtocol) error {
+ _, size, err := iprot.ReadListBegin()
+ if err != nil {
+ return err
}
- return _result.GetSuccess(), nil
-}
-func (p *BackendServiceClient) MakeSnapshot(ctx context.Context, snapshotRequest *agentservice.TSnapshotRequest) (r *agentservice.TAgentResult_, err error) {
- var _args BackendServiceMakeSnapshotArgs
- _args.SnapshotRequest = snapshotRequest
- var _result BackendServiceMakeSnapshotResult
- if err = p.Client_().Call(ctx, "make_snapshot", &_args, &_result); err != nil {
- return
+ _field := make([]int64, 0, size)
+ for i := 0; i < size; i++ {
+
+ var _elem int64
+ if v, err := iprot.ReadI64(); err != nil {
+ return err
+ } else {
+ _elem = v
+ }
+
+ _field = append(_field, _elem)
}
- return _result.GetSuccess(), nil
-}
-func (p *BackendServiceClient) ReleaseSnapshot(ctx context.Context, snapshotPath string) (r *agentservice.TAgentResult_, err error) {
- var _args BackendServiceReleaseSnapshotArgs
- _args.SnapshotPath = snapshotPath
- var _result BackendServiceReleaseSnapshotResult
- if err = p.Client_().Call(ctx, "release_snapshot", &_args, &_result); err != nil {
- return
+ if err := iprot.ReadListEnd(); err != nil {
+ return err
}
- return _result.GetSuccess(), nil
+ p.Tablets = _field
+ return nil
}
-func (p *BackendServiceClient) PublishClusterState(ctx context.Context, request *agentservice.TAgentPublishRequest) (r *agentservice.TAgentResult_, err error) {
- var _args BackendServicePublishClusterStateArgs
- _args.Request = request
- var _result BackendServicePublishClusterStateResult
- if err = p.Client_().Call(ctx, "publish_cluster_state", &_args, &_result); err != nil {
- return
+
+func (p *TCheckWarmUpCacheAsyncRequest) Write(oprot thrift.TProtocol) (err error) {
+ var fieldId int16
+ if err = oprot.WriteStructBegin("TCheckWarmUpCacheAsyncRequest"); err != nil {
+ goto WriteStructBeginError
}
- return _result.GetSuccess(), nil
-}
-func (p *BackendServiceClient) SubmitExportTask(ctx context.Context, request *TExportTaskRequest) (r *status.TStatus, err error) {
- var _args BackendServiceSubmitExportTaskArgs
- _args.Request = request
- var _result BackendServiceSubmitExportTaskResult
- if err = p.Client_().Call(ctx, "submit_export_task", &_args, &_result); err != nil {
- return
+ if p != nil {
+ if err = p.writeField1(oprot); err != nil {
+ fieldId = 1
+ goto WriteFieldError
+ }
}
- return _result.GetSuccess(), nil
-}
-func (p *BackendServiceClient) GetExportStatus(ctx context.Context, taskId *types.TUniqueId) (r *palointernalservice.TExportStatusResult_, err error) {
- var _args BackendServiceGetExportStatusArgs
- _args.TaskId = taskId
- var _result BackendServiceGetExportStatusResult
- if err = p.Client_().Call(ctx, "get_export_status", &_args, &_result); err != nil {
- return
+ if err = oprot.WriteFieldStop(); err != nil {
+ goto WriteFieldStopError
}
- return _result.GetSuccess(), nil
-}
-func (p *BackendServiceClient) EraseExportTask(ctx context.Context, taskId *types.TUniqueId) (r *status.TStatus, err error) {
- var _args BackendServiceEraseExportTaskArgs
- _args.TaskId = taskId
- var _result BackendServiceEraseExportTaskResult
- if err = p.Client_().Call(ctx, "erase_export_task", &_args, &_result); err != nil {
- return
+ if err = oprot.WriteStructEnd(); err != nil {
+ goto WriteStructEndError
}
- return _result.GetSuccess(), nil
+ return nil
+WriteStructBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+WriteFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err)
+WriteFieldStopError:
+ return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err)
+WriteStructEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err)
}
-func (p *BackendServiceClient) GetTabletStat(ctx context.Context) (r *TTabletStatResult_, err error) {
- var _args BackendServiceGetTabletStatArgs
- var _result BackendServiceGetTabletStatResult
- if err = p.Client_().Call(ctx, "get_tablet_stat", &_args, &_result); err != nil {
- return
+
+func (p *TCheckWarmUpCacheAsyncRequest) writeField1(oprot thrift.TProtocol) (err error) {
+ if p.IsSetTablets() {
+ if err = oprot.WriteFieldBegin("tablets", thrift.LIST, 1); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteListBegin(thrift.I64, len(p.Tablets)); err != nil {
+ return err
+ }
+ for _, v := range p.Tablets {
+ if err := oprot.WriteI64(v); err != nil {
+ return err
+ }
+ }
+ if err := oprot.WriteListEnd(); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
}
- return _result.GetSuccess(), nil
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err)
}
-func (p *BackendServiceClient) GetTrashUsedCapacity(ctx context.Context) (r int64, err error) {
- var _args BackendServiceGetTrashUsedCapacityArgs
- var _result BackendServiceGetTrashUsedCapacityResult
- if err = p.Client_().Call(ctx, "get_trash_used_capacity", &_args, &_result); err != nil {
- return
+
+func (p *TCheckWarmUpCacheAsyncRequest) String() string {
+ if p == nil {
+ return ""
}
- return _result.GetSuccess(), nil
+ return fmt.Sprintf("TCheckWarmUpCacheAsyncRequest(%+v)", *p)
+
}
-func (p *BackendServiceClient) GetDiskTrashUsedCapacity(ctx context.Context) (r []*TDiskTrashInfo, err error) {
- var _args BackendServiceGetDiskTrashUsedCapacityArgs
- var _result BackendServiceGetDiskTrashUsedCapacityResult
- if err = p.Client_().Call(ctx, "get_disk_trash_used_capacity", &_args, &_result); err != nil {
- return
+
+func (p *TCheckWarmUpCacheAsyncRequest) DeepEqual(ano *TCheckWarmUpCacheAsyncRequest) bool {
+ if p == ano {
+ return true
+ } else if p == nil || ano == nil {
+ return false
}
- return _result.GetSuccess(), nil
-}
-func (p *BackendServiceClient) SubmitRoutineLoadTask(ctx context.Context, tasks []*TRoutineLoadTask) (r *status.TStatus, err error) {
- var _args BackendServiceSubmitRoutineLoadTaskArgs
- _args.Tasks = tasks
- var _result BackendServiceSubmitRoutineLoadTaskResult
- if err = p.Client_().Call(ctx, "submit_routine_load_task", &_args, &_result); err != nil {
- return
+ if !p.Field1DeepEqual(ano.Tablets) {
+ return false
}
- return _result.GetSuccess(), nil
+ return true
}
-func (p *BackendServiceClient) OpenScanner(ctx context.Context, params *dorisexternalservice.TScanOpenParams) (r *dorisexternalservice.TScanOpenResult_, err error) {
- var _args BackendServiceOpenScannerArgs
- _args.Params = params
- var _result BackendServiceOpenScannerResult
- if err = p.Client_().Call(ctx, "open_scanner", &_args, &_result); err != nil {
- return
+
+func (p *TCheckWarmUpCacheAsyncRequest) Field1DeepEqual(src []int64) bool {
+
+ if len(p.Tablets) != len(src) {
+ return false
}
- return _result.GetSuccess(), nil
-}
-func (p *BackendServiceClient) GetNext(ctx context.Context, params *dorisexternalservice.TScanNextBatchParams) (r *dorisexternalservice.TScanBatchResult_, err error) {
- var _args BackendServiceGetNextArgs
- _args.Params = params
- var _result BackendServiceGetNextResult
- if err = p.Client_().Call(ctx, "get_next", &_args, &_result); err != nil {
- return
+ for i, v := range p.Tablets {
+ _src := src[i]
+ if v != _src {
+ return false
+ }
}
- return _result.GetSuccess(), nil
+ return true
}
-func (p *BackendServiceClient) CloseScanner(ctx context.Context, params *dorisexternalservice.TScanCloseParams) (r *dorisexternalservice.TScanCloseResult_, err error) {
- var _args BackendServiceCloseScannerArgs
- _args.Params = params
- var _result BackendServiceCloseScannerResult
- if err = p.Client_().Call(ctx, "close_scanner", &_args, &_result); err != nil {
- return
+
+type TCheckWarmUpCacheAsyncResponse struct {
+ Status *status.TStatus `thrift:"status,1,required" frugal:"1,required,status.TStatus" json:"status"`
+ TaskDone map[int64]bool `thrift:"task_done,2,optional" frugal:"2,optional,map" json:"task_done,omitempty"`
+}
+
+func NewTCheckWarmUpCacheAsyncResponse() *TCheckWarmUpCacheAsyncResponse {
+ return &TCheckWarmUpCacheAsyncResponse{}
+}
+
+func (p *TCheckWarmUpCacheAsyncResponse) InitDefault() {
+}
+
+var TCheckWarmUpCacheAsyncResponse_Status_DEFAULT *status.TStatus
+
+func (p *TCheckWarmUpCacheAsyncResponse) GetStatus() (v *status.TStatus) {
+ if !p.IsSetStatus() {
+ return TCheckWarmUpCacheAsyncResponse_Status_DEFAULT
}
- return _result.GetSuccess(), nil
+ return p.Status
}
-func (p *BackendServiceClient) GetStreamLoadRecord(ctx context.Context, lastStreamRecordTime int64) (r *TStreamLoadRecordResult_, err error) {
+
+var TCheckWarmUpCacheAsyncResponse_TaskDone_DEFAULT map[int64]bool
+
+func (p *TCheckWarmUpCacheAsyncResponse) GetTaskDone() (v map[int64]bool) {
+ if !p.IsSetTaskDone() {
+ return TCheckWarmUpCacheAsyncResponse_TaskDone_DEFAULT
+ }
+ return p.TaskDone
+}
+func (p *TCheckWarmUpCacheAsyncResponse) SetStatus(val *status.TStatus) {
+ p.Status = val
+}
+func (p *TCheckWarmUpCacheAsyncResponse) SetTaskDone(val map[int64]bool) {
+ p.TaskDone = val
+}
+
+var fieldIDToName_TCheckWarmUpCacheAsyncResponse = map[int16]string{
+ 1: "status",
+ 2: "task_done",
+}
+
+func (p *TCheckWarmUpCacheAsyncResponse) IsSetStatus() bool {
+ return p.Status != nil
+}
+
+func (p *TCheckWarmUpCacheAsyncResponse) IsSetTaskDone() bool {
+ return p.TaskDone != nil
+}
+
+func (p *TCheckWarmUpCacheAsyncResponse) Read(iprot thrift.TProtocol) (err error) {
+
+ var fieldTypeId thrift.TType
+ var fieldId int16
+ var issetStatus bool = false
+
+ if _, err = iprot.ReadStructBegin(); err != nil {
+ goto ReadStructBeginError
+ }
+
+ for {
+ _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin()
+ if err != nil {
+ goto ReadFieldBeginError
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+
+ switch fieldId {
+ case 1:
+ if fieldTypeId == thrift.STRUCT {
+ if err = p.ReadField1(iprot); err != nil {
+ goto ReadFieldError
+ }
+ issetStatus = true
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ case 2:
+ if fieldTypeId == thrift.MAP {
+ if err = p.ReadField2(iprot); err != nil {
+ goto ReadFieldError
+ }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ default:
+ if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ }
+ if err = iprot.ReadFieldEnd(); err != nil {
+ goto ReadFieldEndError
+ }
+ }
+ if err = iprot.ReadStructEnd(); err != nil {
+ goto ReadStructEndError
+ }
+
+ if !issetStatus {
+ fieldId = 1
+ goto RequiredFieldNotSetError
+ }
+ return nil
+ReadStructBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err)
+ReadFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
+ReadFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TCheckWarmUpCacheAsyncResponse[fieldId]), err)
+SkipFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
+
+ReadFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err)
+ReadStructEndError:
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+RequiredFieldNotSetError:
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TCheckWarmUpCacheAsyncResponse[fieldId]))
+}
+
+func (p *TCheckWarmUpCacheAsyncResponse) ReadField1(iprot thrift.TProtocol) error {
+ _field := status.NewTStatus()
+ if err := _field.Read(iprot); err != nil {
+ return err
+ }
+ p.Status = _field
+ return nil
+}
+func (p *TCheckWarmUpCacheAsyncResponse) ReadField2(iprot thrift.TProtocol) error {
+ _, _, size, err := iprot.ReadMapBegin()
+ if err != nil {
+ return err
+ }
+ _field := make(map[int64]bool, size)
+ for i := 0; i < size; i++ {
+ var _key int64
+ if v, err := iprot.ReadI64(); err != nil {
+ return err
+ } else {
+ _key = v
+ }
+
+ var _val bool
+ if v, err := iprot.ReadBool(); err != nil {
+ return err
+ } else {
+ _val = v
+ }
+
+ _field[_key] = _val
+ }
+ if err := iprot.ReadMapEnd(); err != nil {
+ return err
+ }
+ p.TaskDone = _field
+ return nil
+}
+
+func (p *TCheckWarmUpCacheAsyncResponse) Write(oprot thrift.TProtocol) (err error) {
+ var fieldId int16
+ if err = oprot.WriteStructBegin("TCheckWarmUpCacheAsyncResponse"); err != nil {
+ goto WriteStructBeginError
+ }
+ if p != nil {
+ if err = p.writeField1(oprot); err != nil {
+ fieldId = 1
+ goto WriteFieldError
+ }
+ if err = p.writeField2(oprot); err != nil {
+ fieldId = 2
+ goto WriteFieldError
+ }
+ }
+ if err = oprot.WriteFieldStop(); err != nil {
+ goto WriteFieldStopError
+ }
+ if err = oprot.WriteStructEnd(); err != nil {
+ goto WriteStructEndError
+ }
+ return nil
+WriteStructBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+WriteFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err)
+WriteFieldStopError:
+ return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err)
+WriteStructEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err)
+}
+
+func (p *TCheckWarmUpCacheAsyncResponse) writeField1(oprot thrift.TProtocol) (err error) {
+ if err = oprot.WriteFieldBegin("status", thrift.STRUCT, 1); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := p.Status.Write(oprot); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err)
+}
+
+func (p *TCheckWarmUpCacheAsyncResponse) writeField2(oprot thrift.TProtocol) (err error) {
+ if p.IsSetTaskDone() {
+ if err = oprot.WriteFieldBegin("task_done", thrift.MAP, 2); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteMapBegin(thrift.I64, thrift.BOOL, len(p.TaskDone)); err != nil {
+ return err
+ }
+ for k, v := range p.TaskDone {
+ if err := oprot.WriteI64(k); err != nil {
+ return err
+ }
+ if err := oprot.WriteBool(v); err != nil {
+ return err
+ }
+ }
+ if err := oprot.WriteMapEnd(); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err)
+}
+
+func (p *TCheckWarmUpCacheAsyncResponse) String() string {
+ if p == nil {
+ return ""
+ }
+ return fmt.Sprintf("TCheckWarmUpCacheAsyncResponse(%+v)", *p)
+
+}
+
+func (p *TCheckWarmUpCacheAsyncResponse) DeepEqual(ano *TCheckWarmUpCacheAsyncResponse) bool {
+ if p == ano {
+ return true
+ } else if p == nil || ano == nil {
+ return false
+ }
+ if !p.Field1DeepEqual(ano.Status) {
+ return false
+ }
+ if !p.Field2DeepEqual(ano.TaskDone) {
+ return false
+ }
+ return true
+}
+
+func (p *TCheckWarmUpCacheAsyncResponse) Field1DeepEqual(src *status.TStatus) bool {
+
+ if !p.Status.DeepEqual(src) {
+ return false
+ }
+ return true
+}
+func (p *TCheckWarmUpCacheAsyncResponse) Field2DeepEqual(src map[int64]bool) bool {
+
+ if len(p.TaskDone) != len(src) {
+ return false
+ }
+ for k, v := range p.TaskDone {
+ _src := src[k]
+ if v != _src {
+ return false
+ }
+ }
+ return true
+}
+
+type TSyncLoadForTabletsRequest struct {
+ TabletIds []int64 `thrift:"tablet_ids,1,required" frugal:"1,required,list" json:"tablet_ids"`
+}
+
+func NewTSyncLoadForTabletsRequest() *TSyncLoadForTabletsRequest {
+ return &TSyncLoadForTabletsRequest{}
+}
+
+func (p *TSyncLoadForTabletsRequest) InitDefault() {
+}
+
+func (p *TSyncLoadForTabletsRequest) GetTabletIds() (v []int64) {
+ return p.TabletIds
+}
+func (p *TSyncLoadForTabletsRequest) SetTabletIds(val []int64) {
+ p.TabletIds = val
+}
+
+var fieldIDToName_TSyncLoadForTabletsRequest = map[int16]string{
+ 1: "tablet_ids",
+}
+
+func (p *TSyncLoadForTabletsRequest) Read(iprot thrift.TProtocol) (err error) {
+
+ var fieldTypeId thrift.TType
+ var fieldId int16
+ var issetTabletIds bool = false
+
+ if _, err = iprot.ReadStructBegin(); err != nil {
+ goto ReadStructBeginError
+ }
+
+ for {
+ _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin()
+ if err != nil {
+ goto ReadFieldBeginError
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+
+ switch fieldId {
+ case 1:
+ if fieldTypeId == thrift.LIST {
+ if err = p.ReadField1(iprot); err != nil {
+ goto ReadFieldError
+ }
+ issetTabletIds = true
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ default:
+ if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ }
+ if err = iprot.ReadFieldEnd(); err != nil {
+ goto ReadFieldEndError
+ }
+ }
+ if err = iprot.ReadStructEnd(); err != nil {
+ goto ReadStructEndError
+ }
+
+ if !issetTabletIds {
+ fieldId = 1
+ goto RequiredFieldNotSetError
+ }
+ return nil
+ReadStructBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err)
+ReadFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
+ReadFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TSyncLoadForTabletsRequest[fieldId]), err)
+SkipFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
+
+ReadFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err)
+ReadStructEndError:
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+RequiredFieldNotSetError:
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TSyncLoadForTabletsRequest[fieldId]))
+}
+
+func (p *TSyncLoadForTabletsRequest) ReadField1(iprot thrift.TProtocol) error {
+ _, size, err := iprot.ReadListBegin()
+ if err != nil {
+ return err
+ }
+ _field := make([]int64, 0, size)
+ for i := 0; i < size; i++ {
+
+ var _elem int64
+ if v, err := iprot.ReadI64(); err != nil {
+ return err
+ } else {
+ _elem = v
+ }
+
+ _field = append(_field, _elem)
+ }
+ if err := iprot.ReadListEnd(); err != nil {
+ return err
+ }
+ p.TabletIds = _field
+ return nil
+}
+
+func (p *TSyncLoadForTabletsRequest) Write(oprot thrift.TProtocol) (err error) {
+ var fieldId int16
+ if err = oprot.WriteStructBegin("TSyncLoadForTabletsRequest"); err != nil {
+ goto WriteStructBeginError
+ }
+ if p != nil {
+ if err = p.writeField1(oprot); err != nil {
+ fieldId = 1
+ goto WriteFieldError
+ }
+ }
+ if err = oprot.WriteFieldStop(); err != nil {
+ goto WriteFieldStopError
+ }
+ if err = oprot.WriteStructEnd(); err != nil {
+ goto WriteStructEndError
+ }
+ return nil
+WriteStructBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+WriteFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err)
+WriteFieldStopError:
+ return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err)
+WriteStructEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err)
+}
+
+func (p *TSyncLoadForTabletsRequest) writeField1(oprot thrift.TProtocol) (err error) {
+ if err = oprot.WriteFieldBegin("tablet_ids", thrift.LIST, 1); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteListBegin(thrift.I64, len(p.TabletIds)); err != nil {
+ return err
+ }
+ for _, v := range p.TabletIds {
+ if err := oprot.WriteI64(v); err != nil {
+ return err
+ }
+ }
+ if err := oprot.WriteListEnd(); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err)
+}
+
+func (p *TSyncLoadForTabletsRequest) String() string {
+ if p == nil {
+ return ""
+ }
+ return fmt.Sprintf("TSyncLoadForTabletsRequest(%+v)", *p)
+
+}
+
+func (p *TSyncLoadForTabletsRequest) DeepEqual(ano *TSyncLoadForTabletsRequest) bool {
+ if p == ano {
+ return true
+ } else if p == nil || ano == nil {
+ return false
+ }
+ if !p.Field1DeepEqual(ano.TabletIds) {
+ return false
+ }
+ return true
+}
+
+func (p *TSyncLoadForTabletsRequest) Field1DeepEqual(src []int64) bool {
+
+ if len(p.TabletIds) != len(src) {
+ return false
+ }
+ for i, v := range p.TabletIds {
+ _src := src[i]
+ if v != _src {
+ return false
+ }
+ }
+ return true
+}
+
+type TSyncLoadForTabletsResponse struct {
+}
+
+func NewTSyncLoadForTabletsResponse() *TSyncLoadForTabletsResponse {
+ return &TSyncLoadForTabletsResponse{}
+}
+
+func (p *TSyncLoadForTabletsResponse) InitDefault() {
+}
+
+var fieldIDToName_TSyncLoadForTabletsResponse = map[int16]string{}
+
+func (p *TSyncLoadForTabletsResponse) Read(iprot thrift.TProtocol) (err error) {
+
+ var fieldTypeId thrift.TType
+ var fieldId int16
+
+ if _, err = iprot.ReadStructBegin(); err != nil {
+ goto ReadStructBeginError
+ }
+
+ for {
+ _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin()
+ if err != nil {
+ goto ReadFieldBeginError
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+ if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldTypeError
+ }
+ if err = iprot.ReadFieldEnd(); err != nil {
+ goto ReadFieldEndError
+ }
+ }
+ if err = iprot.ReadStructEnd(); err != nil {
+ goto ReadStructEndError
+ }
+
+ return nil
+ReadStructBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err)
+ReadFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
+SkipFieldTypeError:
+ return thrift.PrependError(fmt.Sprintf("%T skip field type %d error", p, fieldTypeId), err)
+
+ReadFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err)
+ReadStructEndError:
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+}
+
+func (p *TSyncLoadForTabletsResponse) Write(oprot thrift.TProtocol) (err error) {
+ if err = oprot.WriteStructBegin("TSyncLoadForTabletsResponse"); err != nil {
+ goto WriteStructBeginError
+ }
+ if p != nil {
+ }
+ if err = oprot.WriteFieldStop(); err != nil {
+ goto WriteFieldStopError
+ }
+ if err = oprot.WriteStructEnd(); err != nil {
+ goto WriteStructEndError
+ }
+ return nil
+WriteStructBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+WriteFieldStopError:
+ return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err)
+WriteStructEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err)
+}
+
+func (p *TSyncLoadForTabletsResponse) String() string {
+ if p == nil {
+ return ""
+ }
+ return fmt.Sprintf("TSyncLoadForTabletsResponse(%+v)", *p)
+
+}
+
+func (p *TSyncLoadForTabletsResponse) DeepEqual(ano *TSyncLoadForTabletsResponse) bool {
+ if p == ano {
+ return true
+ } else if p == nil || ano == nil {
+ return false
+ }
+ return true
+}
+
+type THotPartition struct {
+ PartitionId int64 `thrift:"partition_id,1,required" frugal:"1,required,i64" json:"partition_id"`
+ LastAccessTime int64 `thrift:"last_access_time,2,required" frugal:"2,required,i64" json:"last_access_time"`
+ QueryPerDay *int64 `thrift:"query_per_day,3,optional" frugal:"3,optional,i64" json:"query_per_day,omitempty"`
+ QueryPerWeek *int64 `thrift:"query_per_week,4,optional" frugal:"4,optional,i64" json:"query_per_week,omitempty"`
+}
+
+func NewTHotPartition() *THotPartition {
+ return &THotPartition{}
+}
+
+func (p *THotPartition) InitDefault() {
+}
+
+func (p *THotPartition) GetPartitionId() (v int64) {
+ return p.PartitionId
+}
+
+func (p *THotPartition) GetLastAccessTime() (v int64) {
+ return p.LastAccessTime
+}
+
+var THotPartition_QueryPerDay_DEFAULT int64
+
+func (p *THotPartition) GetQueryPerDay() (v int64) {
+ if !p.IsSetQueryPerDay() {
+ return THotPartition_QueryPerDay_DEFAULT
+ }
+ return *p.QueryPerDay
+}
+
+var THotPartition_QueryPerWeek_DEFAULT int64
+
+func (p *THotPartition) GetQueryPerWeek() (v int64) {
+ if !p.IsSetQueryPerWeek() {
+ return THotPartition_QueryPerWeek_DEFAULT
+ }
+ return *p.QueryPerWeek
+}
+func (p *THotPartition) SetPartitionId(val int64) {
+ p.PartitionId = val
+}
+func (p *THotPartition) SetLastAccessTime(val int64) {
+ p.LastAccessTime = val
+}
+func (p *THotPartition) SetQueryPerDay(val *int64) {
+ p.QueryPerDay = val
+}
+func (p *THotPartition) SetQueryPerWeek(val *int64) {
+ p.QueryPerWeek = val
+}
+
+var fieldIDToName_THotPartition = map[int16]string{
+ 1: "partition_id",
+ 2: "last_access_time",
+ 3: "query_per_day",
+ 4: "query_per_week",
+}
+
+func (p *THotPartition) IsSetQueryPerDay() bool {
+ return p.QueryPerDay != nil
+}
+
+func (p *THotPartition) IsSetQueryPerWeek() bool {
+ return p.QueryPerWeek != nil
+}
+
+func (p *THotPartition) Read(iprot thrift.TProtocol) (err error) {
+
+ var fieldTypeId thrift.TType
+ var fieldId int16
+ var issetPartitionId bool = false
+ var issetLastAccessTime bool = false
+
+ if _, err = iprot.ReadStructBegin(); err != nil {
+ goto ReadStructBeginError
+ }
+
+ for {
+ _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin()
+ if err != nil {
+ goto ReadFieldBeginError
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+
+ switch fieldId {
+ case 1:
+ if fieldTypeId == thrift.I64 {
+ if err = p.ReadField1(iprot); err != nil {
+ goto ReadFieldError
+ }
+ issetPartitionId = true
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ case 2:
+ if fieldTypeId == thrift.I64 {
+ if err = p.ReadField2(iprot); err != nil {
+ goto ReadFieldError
+ }
+ issetLastAccessTime = true
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ case 3:
+ if fieldTypeId == thrift.I64 {
+ if err = p.ReadField3(iprot); err != nil {
+ goto ReadFieldError
+ }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ case 4:
+ if fieldTypeId == thrift.I64 {
+ if err = p.ReadField4(iprot); err != nil {
+ goto ReadFieldError
+ }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ default:
+ if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ }
+ if err = iprot.ReadFieldEnd(); err != nil {
+ goto ReadFieldEndError
+ }
+ }
+ if err = iprot.ReadStructEnd(); err != nil {
+ goto ReadStructEndError
+ }
+
+ if !issetPartitionId {
+ fieldId = 1
+ goto RequiredFieldNotSetError
+ }
+
+ if !issetLastAccessTime {
+ fieldId = 2
+ goto RequiredFieldNotSetError
+ }
+ return nil
+ReadStructBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err)
+ReadFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
+ReadFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_THotPartition[fieldId]), err)
+SkipFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
+
+ReadFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err)
+ReadStructEndError:
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+RequiredFieldNotSetError:
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_THotPartition[fieldId]))
+}
+
+func (p *THotPartition) ReadField1(iprot thrift.TProtocol) error {
+
+ var _field int64
+ if v, err := iprot.ReadI64(); err != nil {
+ return err
+ } else {
+ _field = v
+ }
+ p.PartitionId = _field
+ return nil
+}
+func (p *THotPartition) ReadField2(iprot thrift.TProtocol) error {
+
+ var _field int64
+ if v, err := iprot.ReadI64(); err != nil {
+ return err
+ } else {
+ _field = v
+ }
+ p.LastAccessTime = _field
+ return nil
+}
+func (p *THotPartition) ReadField3(iprot thrift.TProtocol) error {
+
+ var _field *int64
+ if v, err := iprot.ReadI64(); err != nil {
+ return err
+ } else {
+ _field = &v
+ }
+ p.QueryPerDay = _field
+ return nil
+}
+func (p *THotPartition) ReadField4(iprot thrift.TProtocol) error {
+
+ var _field *int64
+ if v, err := iprot.ReadI64(); err != nil {
+ return err
+ } else {
+ _field = &v
+ }
+ p.QueryPerWeek = _field
+ return nil
+}
+
+func (p *THotPartition) Write(oprot thrift.TProtocol) (err error) {
+ var fieldId int16
+ if err = oprot.WriteStructBegin("THotPartition"); err != nil {
+ goto WriteStructBeginError
+ }
+ if p != nil {
+ if err = p.writeField1(oprot); err != nil {
+ fieldId = 1
+ goto WriteFieldError
+ }
+ if err = p.writeField2(oprot); err != nil {
+ fieldId = 2
+ goto WriteFieldError
+ }
+ if err = p.writeField3(oprot); err != nil {
+ fieldId = 3
+ goto WriteFieldError
+ }
+ if err = p.writeField4(oprot); err != nil {
+ fieldId = 4
+ goto WriteFieldError
+ }
+ }
+ if err = oprot.WriteFieldStop(); err != nil {
+ goto WriteFieldStopError
+ }
+ if err = oprot.WriteStructEnd(); err != nil {
+ goto WriteStructEndError
+ }
+ return nil
+WriteStructBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+WriteFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err)
+WriteFieldStopError:
+ return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err)
+WriteStructEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err)
+}
+
+func (p *THotPartition) writeField1(oprot thrift.TProtocol) (err error) {
+ if err = oprot.WriteFieldBegin("partition_id", thrift.I64, 1); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteI64(p.PartitionId); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err)
+}
+
+func (p *THotPartition) writeField2(oprot thrift.TProtocol) (err error) {
+ if err = oprot.WriteFieldBegin("last_access_time", thrift.I64, 2); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteI64(p.LastAccessTime); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err)
+}
+
+func (p *THotPartition) writeField3(oprot thrift.TProtocol) (err error) {
+ if p.IsSetQueryPerDay() {
+ if err = oprot.WriteFieldBegin("query_per_day", thrift.I64, 3); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteI64(*p.QueryPerDay); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err)
+}
+
+func (p *THotPartition) writeField4(oprot thrift.TProtocol) (err error) {
+ if p.IsSetQueryPerWeek() {
+ if err = oprot.WriteFieldBegin("query_per_week", thrift.I64, 4); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteI64(*p.QueryPerWeek); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err)
+}
+
+func (p *THotPartition) String() string {
+ if p == nil {
+ return ""
+ }
+ return fmt.Sprintf("THotPartition(%+v)", *p)
+
+}
+
+func (p *THotPartition) DeepEqual(ano *THotPartition) bool {
+ if p == ano {
+ return true
+ } else if p == nil || ano == nil {
+ return false
+ }
+ if !p.Field1DeepEqual(ano.PartitionId) {
+ return false
+ }
+ if !p.Field2DeepEqual(ano.LastAccessTime) {
+ return false
+ }
+ if !p.Field3DeepEqual(ano.QueryPerDay) {
+ return false
+ }
+ if !p.Field4DeepEqual(ano.QueryPerWeek) {
+ return false
+ }
+ return true
+}
+
+func (p *THotPartition) Field1DeepEqual(src int64) bool {
+
+ if p.PartitionId != src {
+ return false
+ }
+ return true
+}
+func (p *THotPartition) Field2DeepEqual(src int64) bool {
+
+ if p.LastAccessTime != src {
+ return false
+ }
+ return true
+}
+func (p *THotPartition) Field3DeepEqual(src *int64) bool {
+
+ if p.QueryPerDay == src {
+ return true
+ } else if p.QueryPerDay == nil || src == nil {
+ return false
+ }
+ if *p.QueryPerDay != *src {
+ return false
+ }
+ return true
+}
+func (p *THotPartition) Field4DeepEqual(src *int64) bool {
+
+ if p.QueryPerWeek == src {
+ return true
+ } else if p.QueryPerWeek == nil || src == nil {
+ return false
+ }
+ if *p.QueryPerWeek != *src {
+ return false
+ }
+ return true
+}
+
+type THotTableMessage struct {
+ TableId int64 `thrift:"table_id,1,required" frugal:"1,required,i64" json:"table_id"`
+ IndexId int64 `thrift:"index_id,2,required" frugal:"2,required,i64" json:"index_id"`
+ HotPartitions []*THotPartition `thrift:"hot_partitions,3,optional" frugal:"3,optional,list" json:"hot_partitions,omitempty"`
+}
+
+func NewTHotTableMessage() *THotTableMessage {
+ return &THotTableMessage{}
+}
+
+func (p *THotTableMessage) InitDefault() {
+}
+
+func (p *THotTableMessage) GetTableId() (v int64) {
+ return p.TableId
+}
+
+func (p *THotTableMessage) GetIndexId() (v int64) {
+ return p.IndexId
+}
+
+var THotTableMessage_HotPartitions_DEFAULT []*THotPartition
+
+func (p *THotTableMessage) GetHotPartitions() (v []*THotPartition) {
+ if !p.IsSetHotPartitions() {
+ return THotTableMessage_HotPartitions_DEFAULT
+ }
+ return p.HotPartitions
+}
+func (p *THotTableMessage) SetTableId(val int64) {
+ p.TableId = val
+}
+func (p *THotTableMessage) SetIndexId(val int64) {
+ p.IndexId = val
+}
+func (p *THotTableMessage) SetHotPartitions(val []*THotPartition) {
+ p.HotPartitions = val
+}
+
+var fieldIDToName_THotTableMessage = map[int16]string{
+ 1: "table_id",
+ 2: "index_id",
+ 3: "hot_partitions",
+}
+
+func (p *THotTableMessage) IsSetHotPartitions() bool {
+ return p.HotPartitions != nil
+}
+
+func (p *THotTableMessage) Read(iprot thrift.TProtocol) (err error) {
+
+ var fieldTypeId thrift.TType
+ var fieldId int16
+ var issetTableId bool = false
+ var issetIndexId bool = false
+
+ if _, err = iprot.ReadStructBegin(); err != nil {
+ goto ReadStructBeginError
+ }
+
+ for {
+ _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin()
+ if err != nil {
+ goto ReadFieldBeginError
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+
+ switch fieldId {
+ case 1:
+ if fieldTypeId == thrift.I64 {
+ if err = p.ReadField1(iprot); err != nil {
+ goto ReadFieldError
+ }
+ issetTableId = true
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ case 2:
+ if fieldTypeId == thrift.I64 {
+ if err = p.ReadField2(iprot); err != nil {
+ goto ReadFieldError
+ }
+ issetIndexId = true
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ case 3:
+ if fieldTypeId == thrift.LIST {
+ if err = p.ReadField3(iprot); err != nil {
+ goto ReadFieldError
+ }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ default:
+ if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ }
+ if err = iprot.ReadFieldEnd(); err != nil {
+ goto ReadFieldEndError
+ }
+ }
+ if err = iprot.ReadStructEnd(); err != nil {
+ goto ReadStructEndError
+ }
+
+ if !issetTableId {
+ fieldId = 1
+ goto RequiredFieldNotSetError
+ }
+
+ if !issetIndexId {
+ fieldId = 2
+ goto RequiredFieldNotSetError
+ }
+ return nil
+ReadStructBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err)
+ReadFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
+ReadFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_THotTableMessage[fieldId]), err)
+SkipFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
+
+ReadFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err)
+ReadStructEndError:
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+RequiredFieldNotSetError:
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_THotTableMessage[fieldId]))
+}
+
+func (p *THotTableMessage) ReadField1(iprot thrift.TProtocol) error {
+
+ var _field int64
+ if v, err := iprot.ReadI64(); err != nil {
+ return err
+ } else {
+ _field = v
+ }
+ p.TableId = _field
+ return nil
+}
+func (p *THotTableMessage) ReadField2(iprot thrift.TProtocol) error {
+
+ var _field int64
+ if v, err := iprot.ReadI64(); err != nil {
+ return err
+ } else {
+ _field = v
+ }
+ p.IndexId = _field
+ return nil
+}
+func (p *THotTableMessage) ReadField3(iprot thrift.TProtocol) error {
+ _, size, err := iprot.ReadListBegin()
+ if err != nil {
+ return err
+ }
+ _field := make([]*THotPartition, 0, size)
+ values := make([]THotPartition, size)
+ for i := 0; i < size; i++ {
+ _elem := &values[i]
+ _elem.InitDefault()
+
+ if err := _elem.Read(iprot); err != nil {
+ return err
+ }
+
+ _field = append(_field, _elem)
+ }
+ if err := iprot.ReadListEnd(); err != nil {
+ return err
+ }
+ p.HotPartitions = _field
+ return nil
+}
+
+func (p *THotTableMessage) Write(oprot thrift.TProtocol) (err error) {
+ var fieldId int16
+ if err = oprot.WriteStructBegin("THotTableMessage"); err != nil {
+ goto WriteStructBeginError
+ }
+ if p != nil {
+ if err = p.writeField1(oprot); err != nil {
+ fieldId = 1
+ goto WriteFieldError
+ }
+ if err = p.writeField2(oprot); err != nil {
+ fieldId = 2
+ goto WriteFieldError
+ }
+ if err = p.writeField3(oprot); err != nil {
+ fieldId = 3
+ goto WriteFieldError
+ }
+ }
+ if err = oprot.WriteFieldStop(); err != nil {
+ goto WriteFieldStopError
+ }
+ if err = oprot.WriteStructEnd(); err != nil {
+ goto WriteStructEndError
+ }
+ return nil
+WriteStructBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+WriteFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err)
+WriteFieldStopError:
+ return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err)
+WriteStructEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err)
+}
+
+func (p *THotTableMessage) writeField1(oprot thrift.TProtocol) (err error) {
+ if err = oprot.WriteFieldBegin("table_id", thrift.I64, 1); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteI64(p.TableId); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err)
+}
+
+func (p *THotTableMessage) writeField2(oprot thrift.TProtocol) (err error) {
+ if err = oprot.WriteFieldBegin("index_id", thrift.I64, 2); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteI64(p.IndexId); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err)
+}
+
+func (p *THotTableMessage) writeField3(oprot thrift.TProtocol) (err error) {
+ if p.IsSetHotPartitions() {
+ if err = oprot.WriteFieldBegin("hot_partitions", thrift.LIST, 3); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteListBegin(thrift.STRUCT, len(p.HotPartitions)); err != nil {
+ return err
+ }
+ for _, v := range p.HotPartitions {
+ if err := v.Write(oprot); err != nil {
+ return err
+ }
+ }
+ if err := oprot.WriteListEnd(); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err)
+}
+
+func (p *THotTableMessage) String() string {
+ if p == nil {
+ return ""
+ }
+ return fmt.Sprintf("THotTableMessage(%+v)", *p)
+
+}
+
+func (p *THotTableMessage) DeepEqual(ano *THotTableMessage) bool {
+ if p == ano {
+ return true
+ } else if p == nil || ano == nil {
+ return false
+ }
+ if !p.Field1DeepEqual(ano.TableId) {
+ return false
+ }
+ if !p.Field2DeepEqual(ano.IndexId) {
+ return false
+ }
+ if !p.Field3DeepEqual(ano.HotPartitions) {
+ return false
+ }
+ return true
+}
+
+func (p *THotTableMessage) Field1DeepEqual(src int64) bool {
+
+ if p.TableId != src {
+ return false
+ }
+ return true
+}
+func (p *THotTableMessage) Field2DeepEqual(src int64) bool {
+
+ if p.IndexId != src {
+ return false
+ }
+ return true
+}
+func (p *THotTableMessage) Field3DeepEqual(src []*THotPartition) bool {
+
+ if len(p.HotPartitions) != len(src) {
+ return false
+ }
+ for i, v := range p.HotPartitions {
+ _src := src[i]
+ if !v.DeepEqual(_src) {
+ return false
+ }
+ }
+ return true
+}
+
+type TGetTopNHotPartitionsRequest struct {
+}
+
+func NewTGetTopNHotPartitionsRequest() *TGetTopNHotPartitionsRequest {
+ return &TGetTopNHotPartitionsRequest{}
+}
+
+func (p *TGetTopNHotPartitionsRequest) InitDefault() {
+}
+
+var fieldIDToName_TGetTopNHotPartitionsRequest = map[int16]string{}
+
+func (p *TGetTopNHotPartitionsRequest) Read(iprot thrift.TProtocol) (err error) {
+
+ var fieldTypeId thrift.TType
+ var fieldId int16
+
+ if _, err = iprot.ReadStructBegin(); err != nil {
+ goto ReadStructBeginError
+ }
+
+ for {
+ _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin()
+ if err != nil {
+ goto ReadFieldBeginError
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+ if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldTypeError
+ }
+ if err = iprot.ReadFieldEnd(); err != nil {
+ goto ReadFieldEndError
+ }
+ }
+ if err = iprot.ReadStructEnd(); err != nil {
+ goto ReadStructEndError
+ }
+
+ return nil
+ReadStructBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err)
+ReadFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
+SkipFieldTypeError:
+ return thrift.PrependError(fmt.Sprintf("%T skip field type %d error", p, fieldTypeId), err)
+
+ReadFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err)
+ReadStructEndError:
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+}
+
+func (p *TGetTopNHotPartitionsRequest) Write(oprot thrift.TProtocol) (err error) {
+ if err = oprot.WriteStructBegin("TGetTopNHotPartitionsRequest"); err != nil {
+ goto WriteStructBeginError
+ }
+ if p != nil {
+ }
+ if err = oprot.WriteFieldStop(); err != nil {
+ goto WriteFieldStopError
+ }
+ if err = oprot.WriteStructEnd(); err != nil {
+ goto WriteStructEndError
+ }
+ return nil
+WriteStructBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+WriteFieldStopError:
+ return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err)
+WriteStructEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err)
+}
+
+func (p *TGetTopNHotPartitionsRequest) String() string {
+ if p == nil {
+ return ""
+ }
+ return fmt.Sprintf("TGetTopNHotPartitionsRequest(%+v)", *p)
+
+}
+
+func (p *TGetTopNHotPartitionsRequest) DeepEqual(ano *TGetTopNHotPartitionsRequest) bool {
+ if p == ano {
+ return true
+ } else if p == nil || ano == nil {
+ return false
+ }
+ return true
+}
+
+type TGetTopNHotPartitionsResponse struct {
+ FileCacheSize int64 `thrift:"file_cache_size,1,required" frugal:"1,required,i64" json:"file_cache_size"`
+ HotTables []*THotTableMessage `thrift:"hot_tables,2,optional" frugal:"2,optional,list" json:"hot_tables,omitempty"`
+}
+
+func NewTGetTopNHotPartitionsResponse() *TGetTopNHotPartitionsResponse {
+ return &TGetTopNHotPartitionsResponse{}
+}
+
+func (p *TGetTopNHotPartitionsResponse) InitDefault() {
+}
+
+func (p *TGetTopNHotPartitionsResponse) GetFileCacheSize() (v int64) {
+ return p.FileCacheSize
+}
+
+var TGetTopNHotPartitionsResponse_HotTables_DEFAULT []*THotTableMessage
+
+func (p *TGetTopNHotPartitionsResponse) GetHotTables() (v []*THotTableMessage) {
+ if !p.IsSetHotTables() {
+ return TGetTopNHotPartitionsResponse_HotTables_DEFAULT
+ }
+ return p.HotTables
+}
+func (p *TGetTopNHotPartitionsResponse) SetFileCacheSize(val int64) {
+ p.FileCacheSize = val
+}
+func (p *TGetTopNHotPartitionsResponse) SetHotTables(val []*THotTableMessage) {
+ p.HotTables = val
+}
+
+var fieldIDToName_TGetTopNHotPartitionsResponse = map[int16]string{
+ 1: "file_cache_size",
+ 2: "hot_tables",
+}
+
+func (p *TGetTopNHotPartitionsResponse) IsSetHotTables() bool {
+ return p.HotTables != nil
+}
+
+func (p *TGetTopNHotPartitionsResponse) Read(iprot thrift.TProtocol) (err error) {
+
+ var fieldTypeId thrift.TType
+ var fieldId int16
+ var issetFileCacheSize bool = false
+
+ if _, err = iprot.ReadStructBegin(); err != nil {
+ goto ReadStructBeginError
+ }
+
+ for {
+ _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin()
+ if err != nil {
+ goto ReadFieldBeginError
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+
+ switch fieldId {
+ case 1:
+ if fieldTypeId == thrift.I64 {
+ if err = p.ReadField1(iprot); err != nil {
+ goto ReadFieldError
+ }
+ issetFileCacheSize = true
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ case 2:
+ if fieldTypeId == thrift.LIST {
+ if err = p.ReadField2(iprot); err != nil {
+ goto ReadFieldError
+ }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ default:
+ if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ }
+ if err = iprot.ReadFieldEnd(); err != nil {
+ goto ReadFieldEndError
+ }
+ }
+ if err = iprot.ReadStructEnd(); err != nil {
+ goto ReadStructEndError
+ }
+
+ if !issetFileCacheSize {
+ fieldId = 1
+ goto RequiredFieldNotSetError
+ }
+ return nil
+ReadStructBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err)
+ReadFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
+ReadFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TGetTopNHotPartitionsResponse[fieldId]), err)
+SkipFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
+
+ReadFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err)
+ReadStructEndError:
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+RequiredFieldNotSetError:
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TGetTopNHotPartitionsResponse[fieldId]))
+}
+
+func (p *TGetTopNHotPartitionsResponse) ReadField1(iprot thrift.TProtocol) error {
+
+ var _field int64
+ if v, err := iprot.ReadI64(); err != nil {
+ return err
+ } else {
+ _field = v
+ }
+ p.FileCacheSize = _field
+ return nil
+}
+func (p *TGetTopNHotPartitionsResponse) ReadField2(iprot thrift.TProtocol) error {
+ _, size, err := iprot.ReadListBegin()
+ if err != nil {
+ return err
+ }
+ _field := make([]*THotTableMessage, 0, size)
+ values := make([]THotTableMessage, size)
+ for i := 0; i < size; i++ {
+ _elem := &values[i]
+ _elem.InitDefault()
+
+ if err := _elem.Read(iprot); err != nil {
+ return err
+ }
+
+ _field = append(_field, _elem)
+ }
+ if err := iprot.ReadListEnd(); err != nil {
+ return err
+ }
+ p.HotTables = _field
+ return nil
+}
+
+func (p *TGetTopNHotPartitionsResponse) Write(oprot thrift.TProtocol) (err error) {
+ var fieldId int16
+ if err = oprot.WriteStructBegin("TGetTopNHotPartitionsResponse"); err != nil {
+ goto WriteStructBeginError
+ }
+ if p != nil {
+ if err = p.writeField1(oprot); err != nil {
+ fieldId = 1
+ goto WriteFieldError
+ }
+ if err = p.writeField2(oprot); err != nil {
+ fieldId = 2
+ goto WriteFieldError
+ }
+ }
+ if err = oprot.WriteFieldStop(); err != nil {
+ goto WriteFieldStopError
+ }
+ if err = oprot.WriteStructEnd(); err != nil {
+ goto WriteStructEndError
+ }
+ return nil
+WriteStructBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+WriteFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err)
+WriteFieldStopError:
+ return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err)
+WriteStructEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err)
+}
+
+func (p *TGetTopNHotPartitionsResponse) writeField1(oprot thrift.TProtocol) (err error) {
+ if err = oprot.WriteFieldBegin("file_cache_size", thrift.I64, 1); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteI64(p.FileCacheSize); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err)
+}
+
+func (p *TGetTopNHotPartitionsResponse) writeField2(oprot thrift.TProtocol) (err error) {
+ if p.IsSetHotTables() {
+ if err = oprot.WriteFieldBegin("hot_tables", thrift.LIST, 2); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteListBegin(thrift.STRUCT, len(p.HotTables)); err != nil {
+ return err
+ }
+ for _, v := range p.HotTables {
+ if err := v.Write(oprot); err != nil {
+ return err
+ }
+ }
+ if err := oprot.WriteListEnd(); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err)
+}
+
+func (p *TGetTopNHotPartitionsResponse) String() string {
+ if p == nil {
+ return ""
+ }
+ return fmt.Sprintf("TGetTopNHotPartitionsResponse(%+v)", *p)
+
+}
+
+func (p *TGetTopNHotPartitionsResponse) DeepEqual(ano *TGetTopNHotPartitionsResponse) bool {
+ if p == ano {
+ return true
+ } else if p == nil || ano == nil {
+ return false
+ }
+ if !p.Field1DeepEqual(ano.FileCacheSize) {
+ return false
+ }
+ if !p.Field2DeepEqual(ano.HotTables) {
+ return false
+ }
+ return true
+}
+
+func (p *TGetTopNHotPartitionsResponse) Field1DeepEqual(src int64) bool {
+
+ if p.FileCacheSize != src {
+ return false
+ }
+ return true
+}
+func (p *TGetTopNHotPartitionsResponse) Field2DeepEqual(src []*THotTableMessage) bool {
+
+ if len(p.HotTables) != len(src) {
+ return false
+ }
+ for i, v := range p.HotTables {
+ _src := src[i]
+ if !v.DeepEqual(_src) {
+ return false
+ }
+ }
+ return true
+}
+
+type TJobMeta struct {
+ DownloadType TDownloadType `thrift:"download_type,1,required" frugal:"1,required,TDownloadType" json:"download_type"`
+ BeIp *string `thrift:"be_ip,2,optional" frugal:"2,optional,string" json:"be_ip,omitempty"`
+ BrpcPort *int32 `thrift:"brpc_port,3,optional" frugal:"3,optional,i32" json:"brpc_port,omitempty"`
+ TabletIds []int64 `thrift:"tablet_ids,4,optional" frugal:"4,optional,list" json:"tablet_ids,omitempty"`
+}
+
+func NewTJobMeta() *TJobMeta {
+ return &TJobMeta{}
+}
+
+func (p *TJobMeta) InitDefault() {
+}
+
+func (p *TJobMeta) GetDownloadType() (v TDownloadType) {
+ return p.DownloadType
+}
+
+var TJobMeta_BeIp_DEFAULT string
+
+func (p *TJobMeta) GetBeIp() (v string) {
+ if !p.IsSetBeIp() {
+ return TJobMeta_BeIp_DEFAULT
+ }
+ return *p.BeIp
+}
+
+var TJobMeta_BrpcPort_DEFAULT int32
+
+func (p *TJobMeta) GetBrpcPort() (v int32) {
+ if !p.IsSetBrpcPort() {
+ return TJobMeta_BrpcPort_DEFAULT
+ }
+ return *p.BrpcPort
+}
+
+var TJobMeta_TabletIds_DEFAULT []int64
+
+func (p *TJobMeta) GetTabletIds() (v []int64) {
+ if !p.IsSetTabletIds() {
+ return TJobMeta_TabletIds_DEFAULT
+ }
+ return p.TabletIds
+}
+func (p *TJobMeta) SetDownloadType(val TDownloadType) {
+ p.DownloadType = val
+}
+func (p *TJobMeta) SetBeIp(val *string) {
+ p.BeIp = val
+}
+func (p *TJobMeta) SetBrpcPort(val *int32) {
+ p.BrpcPort = val
+}
+func (p *TJobMeta) SetTabletIds(val []int64) {
+ p.TabletIds = val
+}
+
+var fieldIDToName_TJobMeta = map[int16]string{
+ 1: "download_type",
+ 2: "be_ip",
+ 3: "brpc_port",
+ 4: "tablet_ids",
+}
+
+func (p *TJobMeta) IsSetBeIp() bool {
+ return p.BeIp != nil
+}
+
+func (p *TJobMeta) IsSetBrpcPort() bool {
+ return p.BrpcPort != nil
+}
+
+func (p *TJobMeta) IsSetTabletIds() bool {
+ return p.TabletIds != nil
+}
+
+func (p *TJobMeta) Read(iprot thrift.TProtocol) (err error) {
+
+ var fieldTypeId thrift.TType
+ var fieldId int16
+ var issetDownloadType bool = false
+
+ if _, err = iprot.ReadStructBegin(); err != nil {
+ goto ReadStructBeginError
+ }
+
+ for {
+ _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin()
+ if err != nil {
+ goto ReadFieldBeginError
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+
+ switch fieldId {
+ case 1:
+ if fieldTypeId == thrift.I32 {
+ if err = p.ReadField1(iprot); err != nil {
+ goto ReadFieldError
+ }
+ issetDownloadType = true
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ case 2:
+ if fieldTypeId == thrift.STRING {
+ if err = p.ReadField2(iprot); err != nil {
+ goto ReadFieldError
+ }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ case 3:
+ if fieldTypeId == thrift.I32 {
+ if err = p.ReadField3(iprot); err != nil {
+ goto ReadFieldError
+ }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ case 4:
+ if fieldTypeId == thrift.LIST {
+ if err = p.ReadField4(iprot); err != nil {
+ goto ReadFieldError
+ }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ default:
+ if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ }
+ if err = iprot.ReadFieldEnd(); err != nil {
+ goto ReadFieldEndError
+ }
+ }
+ if err = iprot.ReadStructEnd(); err != nil {
+ goto ReadStructEndError
+ }
+
+ if !issetDownloadType {
+ fieldId = 1
+ goto RequiredFieldNotSetError
+ }
+ return nil
+ReadStructBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err)
+ReadFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
+ReadFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TJobMeta[fieldId]), err)
+SkipFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
+
+ReadFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err)
+ReadStructEndError:
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+RequiredFieldNotSetError:
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TJobMeta[fieldId]))
+}
+
+func (p *TJobMeta) ReadField1(iprot thrift.TProtocol) error {
+
+ var _field TDownloadType
+ if v, err := iprot.ReadI32(); err != nil {
+ return err
+ } else {
+ _field = TDownloadType(v)
+ }
+ p.DownloadType = _field
+ return nil
+}
+func (p *TJobMeta) ReadField2(iprot thrift.TProtocol) error {
+
+ var _field *string
+ if v, err := iprot.ReadString(); err != nil {
+ return err
+ } else {
+ _field = &v
+ }
+ p.BeIp = _field
+ return nil
+}
+func (p *TJobMeta) ReadField3(iprot thrift.TProtocol) error {
+
+ var _field *int32
+ if v, err := iprot.ReadI32(); err != nil {
+ return err
+ } else {
+ _field = &v
+ }
+ p.BrpcPort = _field
+ return nil
+}
+func (p *TJobMeta) ReadField4(iprot thrift.TProtocol) error {
+ _, size, err := iprot.ReadListBegin()
+ if err != nil {
+ return err
+ }
+ _field := make([]int64, 0, size)
+ for i := 0; i < size; i++ {
+
+ var _elem int64
+ if v, err := iprot.ReadI64(); err != nil {
+ return err
+ } else {
+ _elem = v
+ }
+
+ _field = append(_field, _elem)
+ }
+ if err := iprot.ReadListEnd(); err != nil {
+ return err
+ }
+ p.TabletIds = _field
+ return nil
+}
+
+func (p *TJobMeta) Write(oprot thrift.TProtocol) (err error) {
+ var fieldId int16
+ if err = oprot.WriteStructBegin("TJobMeta"); err != nil {
+ goto WriteStructBeginError
+ }
+ if p != nil {
+ if err = p.writeField1(oprot); err != nil {
+ fieldId = 1
+ goto WriteFieldError
+ }
+ if err = p.writeField2(oprot); err != nil {
+ fieldId = 2
+ goto WriteFieldError
+ }
+ if err = p.writeField3(oprot); err != nil {
+ fieldId = 3
+ goto WriteFieldError
+ }
+ if err = p.writeField4(oprot); err != nil {
+ fieldId = 4
+ goto WriteFieldError
+ }
+ }
+ if err = oprot.WriteFieldStop(); err != nil {
+ goto WriteFieldStopError
+ }
+ if err = oprot.WriteStructEnd(); err != nil {
+ goto WriteStructEndError
+ }
+ return nil
+WriteStructBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+WriteFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err)
+WriteFieldStopError:
+ return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err)
+WriteStructEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err)
+}
+
+func (p *TJobMeta) writeField1(oprot thrift.TProtocol) (err error) {
+ if err = oprot.WriteFieldBegin("download_type", thrift.I32, 1); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteI32(int32(p.DownloadType)); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err)
+}
+
+func (p *TJobMeta) writeField2(oprot thrift.TProtocol) (err error) {
+ if p.IsSetBeIp() {
+ if err = oprot.WriteFieldBegin("be_ip", thrift.STRING, 2); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteString(*p.BeIp); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err)
+}
+
+func (p *TJobMeta) writeField3(oprot thrift.TProtocol) (err error) {
+ if p.IsSetBrpcPort() {
+ if err = oprot.WriteFieldBegin("brpc_port", thrift.I32, 3); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteI32(*p.BrpcPort); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err)
+}
+
+func (p *TJobMeta) writeField4(oprot thrift.TProtocol) (err error) {
+ if p.IsSetTabletIds() {
+ if err = oprot.WriteFieldBegin("tablet_ids", thrift.LIST, 4); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteListBegin(thrift.I64, len(p.TabletIds)); err != nil {
+ return err
+ }
+ for _, v := range p.TabletIds {
+ if err := oprot.WriteI64(v); err != nil {
+ return err
+ }
+ }
+ if err := oprot.WriteListEnd(); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err)
+}
+
+func (p *TJobMeta) String() string {
+ if p == nil {
+ return ""
+ }
+ return fmt.Sprintf("TJobMeta(%+v)", *p)
+
+}
+
+func (p *TJobMeta) DeepEqual(ano *TJobMeta) bool {
+ if p == ano {
+ return true
+ } else if p == nil || ano == nil {
+ return false
+ }
+ if !p.Field1DeepEqual(ano.DownloadType) {
+ return false
+ }
+ if !p.Field2DeepEqual(ano.BeIp) {
+ return false
+ }
+ if !p.Field3DeepEqual(ano.BrpcPort) {
+ return false
+ }
+ if !p.Field4DeepEqual(ano.TabletIds) {
+ return false
+ }
+ return true
+}
+
+func (p *TJobMeta) Field1DeepEqual(src TDownloadType) bool {
+
+ if p.DownloadType != src {
+ return false
+ }
+ return true
+}
+func (p *TJobMeta) Field2DeepEqual(src *string) bool {
+
+ if p.BeIp == src {
+ return true
+ } else if p.BeIp == nil || src == nil {
+ return false
+ }
+ if strings.Compare(*p.BeIp, *src) != 0 {
+ return false
+ }
+ return true
+}
+func (p *TJobMeta) Field3DeepEqual(src *int32) bool {
+
+ if p.BrpcPort == src {
+ return true
+ } else if p.BrpcPort == nil || src == nil {
+ return false
+ }
+ if *p.BrpcPort != *src {
+ return false
+ }
+ return true
+}
+func (p *TJobMeta) Field4DeepEqual(src []int64) bool {
+
+ if len(p.TabletIds) != len(src) {
+ return false
+ }
+ for i, v := range p.TabletIds {
+ _src := src[i]
+ if v != _src {
+ return false
+ }
+ }
+ return true
+}
+
+type TWarmUpTabletsRequest struct {
+ JobId int64 `thrift:"job_id,1,required" frugal:"1,required,i64" json:"job_id"`
+ BatchId int64 `thrift:"batch_id,2,required" frugal:"2,required,i64" json:"batch_id"`
+ JobMetas []*TJobMeta `thrift:"job_metas,3,optional" frugal:"3,optional,list" json:"job_metas,omitempty"`
+ Type TWarmUpTabletsRequestType `thrift:"type,4,required" frugal:"4,required,TWarmUpTabletsRequestType" json:"type"`
+}
+
+func NewTWarmUpTabletsRequest() *TWarmUpTabletsRequest {
+ return &TWarmUpTabletsRequest{}
+}
+
+func (p *TWarmUpTabletsRequest) InitDefault() {
+}
+
+func (p *TWarmUpTabletsRequest) GetJobId() (v int64) {
+ return p.JobId
+}
+
+func (p *TWarmUpTabletsRequest) GetBatchId() (v int64) {
+ return p.BatchId
+}
+
+var TWarmUpTabletsRequest_JobMetas_DEFAULT []*TJobMeta
+
+func (p *TWarmUpTabletsRequest) GetJobMetas() (v []*TJobMeta) {
+ if !p.IsSetJobMetas() {
+ return TWarmUpTabletsRequest_JobMetas_DEFAULT
+ }
+ return p.JobMetas
+}
+
+func (p *TWarmUpTabletsRequest) GetType() (v TWarmUpTabletsRequestType) {
+ return p.Type
+}
+func (p *TWarmUpTabletsRequest) SetJobId(val int64) {
+ p.JobId = val
+}
+func (p *TWarmUpTabletsRequest) SetBatchId(val int64) {
+ p.BatchId = val
+}
+func (p *TWarmUpTabletsRequest) SetJobMetas(val []*TJobMeta) {
+ p.JobMetas = val
+}
+func (p *TWarmUpTabletsRequest) SetType(val TWarmUpTabletsRequestType) {
+ p.Type = val
+}
+
+var fieldIDToName_TWarmUpTabletsRequest = map[int16]string{
+ 1: "job_id",
+ 2: "batch_id",
+ 3: "job_metas",
+ 4: "type",
+}
+
+func (p *TWarmUpTabletsRequest) IsSetJobMetas() bool {
+ return p.JobMetas != nil
+}
+
+func (p *TWarmUpTabletsRequest) Read(iprot thrift.TProtocol) (err error) {
+
+ var fieldTypeId thrift.TType
+ var fieldId int16
+ var issetJobId bool = false
+ var issetBatchId bool = false
+ var issetType bool = false
+
+ if _, err = iprot.ReadStructBegin(); err != nil {
+ goto ReadStructBeginError
+ }
+
+ for {
+ _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin()
+ if err != nil {
+ goto ReadFieldBeginError
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+
+ switch fieldId {
+ case 1:
+ if fieldTypeId == thrift.I64 {
+ if err = p.ReadField1(iprot); err != nil {
+ goto ReadFieldError
+ }
+ issetJobId = true
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ case 2:
+ if fieldTypeId == thrift.I64 {
+ if err = p.ReadField2(iprot); err != nil {
+ goto ReadFieldError
+ }
+ issetBatchId = true
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ case 3:
+ if fieldTypeId == thrift.LIST {
+ if err = p.ReadField3(iprot); err != nil {
+ goto ReadFieldError
+ }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ case 4:
+ if fieldTypeId == thrift.I32 {
+ if err = p.ReadField4(iprot); err != nil {
+ goto ReadFieldError
+ }
+ issetType = true
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ default:
+ if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ }
+ if err = iprot.ReadFieldEnd(); err != nil {
+ goto ReadFieldEndError
+ }
+ }
+ if err = iprot.ReadStructEnd(); err != nil {
+ goto ReadStructEndError
+ }
+
+ if !issetJobId {
+ fieldId = 1
+ goto RequiredFieldNotSetError
+ }
+
+ if !issetBatchId {
+ fieldId = 2
+ goto RequiredFieldNotSetError
+ }
+
+ if !issetType {
+ fieldId = 4
+ goto RequiredFieldNotSetError
+ }
+ return nil
+ReadStructBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err)
+ReadFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
+ReadFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TWarmUpTabletsRequest[fieldId]), err)
+SkipFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
+
+ReadFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err)
+ReadStructEndError:
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+RequiredFieldNotSetError:
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TWarmUpTabletsRequest[fieldId]))
+}
+
+func (p *TWarmUpTabletsRequest) ReadField1(iprot thrift.TProtocol) error {
+
+ var _field int64
+ if v, err := iprot.ReadI64(); err != nil {
+ return err
+ } else {
+ _field = v
+ }
+ p.JobId = _field
+ return nil
+}
+func (p *TWarmUpTabletsRequest) ReadField2(iprot thrift.TProtocol) error {
+
+ var _field int64
+ if v, err := iprot.ReadI64(); err != nil {
+ return err
+ } else {
+ _field = v
+ }
+ p.BatchId = _field
+ return nil
+}
+func (p *TWarmUpTabletsRequest) ReadField3(iprot thrift.TProtocol) error {
+ _, size, err := iprot.ReadListBegin()
+ if err != nil {
+ return err
+ }
+ _field := make([]*TJobMeta, 0, size)
+ values := make([]TJobMeta, size)
+ for i := 0; i < size; i++ {
+ _elem := &values[i]
+ _elem.InitDefault()
+
+ if err := _elem.Read(iprot); err != nil {
+ return err
+ }
+
+ _field = append(_field, _elem)
+ }
+ if err := iprot.ReadListEnd(); err != nil {
+ return err
+ }
+ p.JobMetas = _field
+ return nil
+}
+func (p *TWarmUpTabletsRequest) ReadField4(iprot thrift.TProtocol) error {
+
+ var _field TWarmUpTabletsRequestType
+ if v, err := iprot.ReadI32(); err != nil {
+ return err
+ } else {
+ _field = TWarmUpTabletsRequestType(v)
+ }
+ p.Type = _field
+ return nil
+}
+
+func (p *TWarmUpTabletsRequest) Write(oprot thrift.TProtocol) (err error) {
+ var fieldId int16
+ if err = oprot.WriteStructBegin("TWarmUpTabletsRequest"); err != nil {
+ goto WriteStructBeginError
+ }
+ if p != nil {
+ if err = p.writeField1(oprot); err != nil {
+ fieldId = 1
+ goto WriteFieldError
+ }
+ if err = p.writeField2(oprot); err != nil {
+ fieldId = 2
+ goto WriteFieldError
+ }
+ if err = p.writeField3(oprot); err != nil {
+ fieldId = 3
+ goto WriteFieldError
+ }
+ if err = p.writeField4(oprot); err != nil {
+ fieldId = 4
+ goto WriteFieldError
+ }
+ }
+ if err = oprot.WriteFieldStop(); err != nil {
+ goto WriteFieldStopError
+ }
+ if err = oprot.WriteStructEnd(); err != nil {
+ goto WriteStructEndError
+ }
+ return nil
+WriteStructBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+WriteFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err)
+WriteFieldStopError:
+ return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err)
+WriteStructEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err)
+}
+
+func (p *TWarmUpTabletsRequest) writeField1(oprot thrift.TProtocol) (err error) {
+ if err = oprot.WriteFieldBegin("job_id", thrift.I64, 1); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteI64(p.JobId); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err)
+}
+
+func (p *TWarmUpTabletsRequest) writeField2(oprot thrift.TProtocol) (err error) {
+ if err = oprot.WriteFieldBegin("batch_id", thrift.I64, 2); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteI64(p.BatchId); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err)
+}
+
+func (p *TWarmUpTabletsRequest) writeField3(oprot thrift.TProtocol) (err error) {
+ if p.IsSetJobMetas() {
+ if err = oprot.WriteFieldBegin("job_metas", thrift.LIST, 3); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteListBegin(thrift.STRUCT, len(p.JobMetas)); err != nil {
+ return err
+ }
+ for _, v := range p.JobMetas {
+ if err := v.Write(oprot); err != nil {
+ return err
+ }
+ }
+ if err := oprot.WriteListEnd(); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err)
+}
+
+func (p *TWarmUpTabletsRequest) writeField4(oprot thrift.TProtocol) (err error) {
+ if err = oprot.WriteFieldBegin("type", thrift.I32, 4); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteI32(int32(p.Type)); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err)
+}
+
+func (p *TWarmUpTabletsRequest) String() string {
+ if p == nil {
+ return ""
+ }
+ return fmt.Sprintf("TWarmUpTabletsRequest(%+v)", *p)
+
+}
+
+func (p *TWarmUpTabletsRequest) DeepEqual(ano *TWarmUpTabletsRequest) bool {
+ if p == ano {
+ return true
+ } else if p == nil || ano == nil {
+ return false
+ }
+ if !p.Field1DeepEqual(ano.JobId) {
+ return false
+ }
+ if !p.Field2DeepEqual(ano.BatchId) {
+ return false
+ }
+ if !p.Field3DeepEqual(ano.JobMetas) {
+ return false
+ }
+ if !p.Field4DeepEqual(ano.Type) {
+ return false
+ }
+ return true
+}
+
+func (p *TWarmUpTabletsRequest) Field1DeepEqual(src int64) bool {
+
+ if p.JobId != src {
+ return false
+ }
+ return true
+}
+func (p *TWarmUpTabletsRequest) Field2DeepEqual(src int64) bool {
+
+ if p.BatchId != src {
+ return false
+ }
+ return true
+}
+func (p *TWarmUpTabletsRequest) Field3DeepEqual(src []*TJobMeta) bool {
+
+ if len(p.JobMetas) != len(src) {
+ return false
+ }
+ for i, v := range p.JobMetas {
+ _src := src[i]
+ if !v.DeepEqual(_src) {
+ return false
+ }
+ }
+ return true
+}
+func (p *TWarmUpTabletsRequest) Field4DeepEqual(src TWarmUpTabletsRequestType) bool {
+
+ if p.Type != src {
+ return false
+ }
+ return true
+}
+
+type TWarmUpTabletsResponse struct {
+ Status *status.TStatus `thrift:"status,1,required" frugal:"1,required,status.TStatus" json:"status"`
+ JobId *int64 `thrift:"job_id,2,optional" frugal:"2,optional,i64" json:"job_id,omitempty"`
+ BatchId *int64 `thrift:"batch_id,3,optional" frugal:"3,optional,i64" json:"batch_id,omitempty"`
+ PendingJobSize *int64 `thrift:"pending_job_size,4,optional" frugal:"4,optional,i64" json:"pending_job_size,omitempty"`
+ FinishJobSize *int64 `thrift:"finish_job_size,5,optional" frugal:"5,optional,i64" json:"finish_job_size,omitempty"`
+}
+
+func NewTWarmUpTabletsResponse() *TWarmUpTabletsResponse {
+ return &TWarmUpTabletsResponse{}
+}
+
+func (p *TWarmUpTabletsResponse) InitDefault() {
+}
+
+var TWarmUpTabletsResponse_Status_DEFAULT *status.TStatus
+
+func (p *TWarmUpTabletsResponse) GetStatus() (v *status.TStatus) {
+ if !p.IsSetStatus() {
+ return TWarmUpTabletsResponse_Status_DEFAULT
+ }
+ return p.Status
+}
+
+var TWarmUpTabletsResponse_JobId_DEFAULT int64
+
+func (p *TWarmUpTabletsResponse) GetJobId() (v int64) {
+ if !p.IsSetJobId() {
+ return TWarmUpTabletsResponse_JobId_DEFAULT
+ }
+ return *p.JobId
+}
+
+var TWarmUpTabletsResponse_BatchId_DEFAULT int64
+
+func (p *TWarmUpTabletsResponse) GetBatchId() (v int64) {
+ if !p.IsSetBatchId() {
+ return TWarmUpTabletsResponse_BatchId_DEFAULT
+ }
+ return *p.BatchId
+}
+
+var TWarmUpTabletsResponse_PendingJobSize_DEFAULT int64
+
+func (p *TWarmUpTabletsResponse) GetPendingJobSize() (v int64) {
+ if !p.IsSetPendingJobSize() {
+ return TWarmUpTabletsResponse_PendingJobSize_DEFAULT
+ }
+ return *p.PendingJobSize
+}
+
+var TWarmUpTabletsResponse_FinishJobSize_DEFAULT int64
+
+func (p *TWarmUpTabletsResponse) GetFinishJobSize() (v int64) {
+ if !p.IsSetFinishJobSize() {
+ return TWarmUpTabletsResponse_FinishJobSize_DEFAULT
+ }
+ return *p.FinishJobSize
+}
+func (p *TWarmUpTabletsResponse) SetStatus(val *status.TStatus) {
+ p.Status = val
+}
+func (p *TWarmUpTabletsResponse) SetJobId(val *int64) {
+ p.JobId = val
+}
+func (p *TWarmUpTabletsResponse) SetBatchId(val *int64) {
+ p.BatchId = val
+}
+func (p *TWarmUpTabletsResponse) SetPendingJobSize(val *int64) {
+ p.PendingJobSize = val
+}
+func (p *TWarmUpTabletsResponse) SetFinishJobSize(val *int64) {
+ p.FinishJobSize = val
+}
+
+var fieldIDToName_TWarmUpTabletsResponse = map[int16]string{
+ 1: "status",
+ 2: "job_id",
+ 3: "batch_id",
+ 4: "pending_job_size",
+ 5: "finish_job_size",
+}
+
+func (p *TWarmUpTabletsResponse) IsSetStatus() bool {
+ return p.Status != nil
+}
+
+func (p *TWarmUpTabletsResponse) IsSetJobId() bool {
+ return p.JobId != nil
+}
+
+func (p *TWarmUpTabletsResponse) IsSetBatchId() bool {
+ return p.BatchId != nil
+}
+
+func (p *TWarmUpTabletsResponse) IsSetPendingJobSize() bool {
+ return p.PendingJobSize != nil
+}
+
+func (p *TWarmUpTabletsResponse) IsSetFinishJobSize() bool {
+ return p.FinishJobSize != nil
+}
+
+func (p *TWarmUpTabletsResponse) Read(iprot thrift.TProtocol) (err error) {
+
+ var fieldTypeId thrift.TType
+ var fieldId int16
+ var issetStatus bool = false
+
+ if _, err = iprot.ReadStructBegin(); err != nil {
+ goto ReadStructBeginError
+ }
+
+ for {
+ _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin()
+ if err != nil {
+ goto ReadFieldBeginError
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+
+ switch fieldId {
+ case 1:
+ if fieldTypeId == thrift.STRUCT {
+ if err = p.ReadField1(iprot); err != nil {
+ goto ReadFieldError
+ }
+ issetStatus = true
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ case 2:
+ if fieldTypeId == thrift.I64 {
+ if err = p.ReadField2(iprot); err != nil {
+ goto ReadFieldError
+ }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ case 3:
+ if fieldTypeId == thrift.I64 {
+ if err = p.ReadField3(iprot); err != nil {
+ goto ReadFieldError
+ }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ case 4:
+ if fieldTypeId == thrift.I64 {
+ if err = p.ReadField4(iprot); err != nil {
+ goto ReadFieldError
+ }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ case 5:
+ if fieldTypeId == thrift.I64 {
+ if err = p.ReadField5(iprot); err != nil {
+ goto ReadFieldError
+ }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ default:
+ if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ }
+ if err = iprot.ReadFieldEnd(); err != nil {
+ goto ReadFieldEndError
+ }
+ }
+ if err = iprot.ReadStructEnd(); err != nil {
+ goto ReadStructEndError
+ }
+
+ if !issetStatus {
+ fieldId = 1
+ goto RequiredFieldNotSetError
+ }
+ return nil
+ReadStructBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err)
+ReadFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
+ReadFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TWarmUpTabletsResponse[fieldId]), err)
+SkipFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
+
+ReadFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err)
+ReadStructEndError:
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+RequiredFieldNotSetError:
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TWarmUpTabletsResponse[fieldId]))
+}
+
+func (p *TWarmUpTabletsResponse) ReadField1(iprot thrift.TProtocol) error {
+ _field := status.NewTStatus()
+ if err := _field.Read(iprot); err != nil {
+ return err
+ }
+ p.Status = _field
+ return nil
+}
+func (p *TWarmUpTabletsResponse) ReadField2(iprot thrift.TProtocol) error {
+
+ var _field *int64
+ if v, err := iprot.ReadI64(); err != nil {
+ return err
+ } else {
+ _field = &v
+ }
+ p.JobId = _field
+ return nil
+}
+func (p *TWarmUpTabletsResponse) ReadField3(iprot thrift.TProtocol) error {
+
+ var _field *int64
+ if v, err := iprot.ReadI64(); err != nil {
+ return err
+ } else {
+ _field = &v
+ }
+ p.BatchId = _field
+ return nil
+}
+func (p *TWarmUpTabletsResponse) ReadField4(iprot thrift.TProtocol) error {
+
+ var _field *int64
+ if v, err := iprot.ReadI64(); err != nil {
+ return err
+ } else {
+ _field = &v
+ }
+ p.PendingJobSize = _field
+ return nil
+}
+func (p *TWarmUpTabletsResponse) ReadField5(iprot thrift.TProtocol) error {
+
+ var _field *int64
+ if v, err := iprot.ReadI64(); err != nil {
+ return err
+ } else {
+ _field = &v
+ }
+ p.FinishJobSize = _field
+ return nil
+}
+
+func (p *TWarmUpTabletsResponse) Write(oprot thrift.TProtocol) (err error) {
+ var fieldId int16
+ if err = oprot.WriteStructBegin("TWarmUpTabletsResponse"); err != nil {
+ goto WriteStructBeginError
+ }
+ if p != nil {
+ if err = p.writeField1(oprot); err != nil {
+ fieldId = 1
+ goto WriteFieldError
+ }
+ if err = p.writeField2(oprot); err != nil {
+ fieldId = 2
+ goto WriteFieldError
+ }
+ if err = p.writeField3(oprot); err != nil {
+ fieldId = 3
+ goto WriteFieldError
+ }
+ if err = p.writeField4(oprot); err != nil {
+ fieldId = 4
+ goto WriteFieldError
+ }
+ if err = p.writeField5(oprot); err != nil {
+ fieldId = 5
+ goto WriteFieldError
+ }
+ }
+ if err = oprot.WriteFieldStop(); err != nil {
+ goto WriteFieldStopError
+ }
+ if err = oprot.WriteStructEnd(); err != nil {
+ goto WriteStructEndError
+ }
+ return nil
+WriteStructBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+WriteFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err)
+WriteFieldStopError:
+ return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err)
+WriteStructEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err)
+}
+
+func (p *TWarmUpTabletsResponse) writeField1(oprot thrift.TProtocol) (err error) {
+ if err = oprot.WriteFieldBegin("status", thrift.STRUCT, 1); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := p.Status.Write(oprot); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err)
+}
+
+func (p *TWarmUpTabletsResponse) writeField2(oprot thrift.TProtocol) (err error) {
+ if p.IsSetJobId() {
+ if err = oprot.WriteFieldBegin("job_id", thrift.I64, 2); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteI64(*p.JobId); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err)
+}
+
+func (p *TWarmUpTabletsResponse) writeField3(oprot thrift.TProtocol) (err error) {
+ if p.IsSetBatchId() {
+ if err = oprot.WriteFieldBegin("batch_id", thrift.I64, 3); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteI64(*p.BatchId); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err)
+}
+
+func (p *TWarmUpTabletsResponse) writeField4(oprot thrift.TProtocol) (err error) {
+ if p.IsSetPendingJobSize() {
+ if err = oprot.WriteFieldBegin("pending_job_size", thrift.I64, 4); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteI64(*p.PendingJobSize); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err)
+}
+
+func (p *TWarmUpTabletsResponse) writeField5(oprot thrift.TProtocol) (err error) {
+ if p.IsSetFinishJobSize() {
+ if err = oprot.WriteFieldBegin("finish_job_size", thrift.I64, 5); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteI64(*p.FinishJobSize); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 5 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err)
+}
+
+func (p *TWarmUpTabletsResponse) String() string {
+ if p == nil {
+ return ""
+ }
+ return fmt.Sprintf("TWarmUpTabletsResponse(%+v)", *p)
+
+}
+
+func (p *TWarmUpTabletsResponse) DeepEqual(ano *TWarmUpTabletsResponse) bool {
+ if p == ano {
+ return true
+ } else if p == nil || ano == nil {
+ return false
+ }
+ if !p.Field1DeepEqual(ano.Status) {
+ return false
+ }
+ if !p.Field2DeepEqual(ano.JobId) {
+ return false
+ }
+ if !p.Field3DeepEqual(ano.BatchId) {
+ return false
+ }
+ if !p.Field4DeepEqual(ano.PendingJobSize) {
+ return false
+ }
+ if !p.Field5DeepEqual(ano.FinishJobSize) {
+ return false
+ }
+ return true
+}
+
+func (p *TWarmUpTabletsResponse) Field1DeepEqual(src *status.TStatus) bool {
+
+ if !p.Status.DeepEqual(src) {
+ return false
+ }
+ return true
+}
+func (p *TWarmUpTabletsResponse) Field2DeepEqual(src *int64) bool {
+
+ if p.JobId == src {
+ return true
+ } else if p.JobId == nil || src == nil {
+ return false
+ }
+ if *p.JobId != *src {
+ return false
+ }
+ return true
+}
+func (p *TWarmUpTabletsResponse) Field3DeepEqual(src *int64) bool {
+
+ if p.BatchId == src {
+ return true
+ } else if p.BatchId == nil || src == nil {
+ return false
+ }
+ if *p.BatchId != *src {
+ return false
+ }
+ return true
+}
+func (p *TWarmUpTabletsResponse) Field4DeepEqual(src *int64) bool {
+
+ if p.PendingJobSize == src {
+ return true
+ } else if p.PendingJobSize == nil || src == nil {
+ return false
+ }
+ if *p.PendingJobSize != *src {
+ return false
+ }
+ return true
+}
+func (p *TWarmUpTabletsResponse) Field5DeepEqual(src *int64) bool {
+
+ if p.FinishJobSize == src {
+ return true
+ } else if p.FinishJobSize == nil || src == nil {
+ return false
+ }
+ if *p.FinishJobSize != *src {
+ return false
+ }
+ return true
+}
+
+type TIngestBinlogRequest struct {
+ TxnId *int64 `thrift:"txn_id,1,optional" frugal:"1,optional,i64" json:"txn_id,omitempty"`
+ RemoteTabletId *int64 `thrift:"remote_tablet_id,2,optional" frugal:"2,optional,i64" json:"remote_tablet_id,omitempty"`
+ BinlogVersion *int64 `thrift:"binlog_version,3,optional" frugal:"3,optional,i64" json:"binlog_version,omitempty"`
+ RemoteHost *string `thrift:"remote_host,4,optional" frugal:"4,optional,string" json:"remote_host,omitempty"`
+ RemotePort *string `thrift:"remote_port,5,optional" frugal:"5,optional,string" json:"remote_port,omitempty"`
+ PartitionId *int64 `thrift:"partition_id,6,optional" frugal:"6,optional,i64" json:"partition_id,omitempty"`
+ LocalTabletId *int64 `thrift:"local_tablet_id,7,optional" frugal:"7,optional,i64" json:"local_tablet_id,omitempty"`
+ LoadId *types.TUniqueId `thrift:"load_id,8,optional" frugal:"8,optional,types.TUniqueId" json:"load_id,omitempty"`
+}
+
+func NewTIngestBinlogRequest() *TIngestBinlogRequest {
+ return &TIngestBinlogRequest{}
+}
+
+func (p *TIngestBinlogRequest) InitDefault() {
+}
+
+var TIngestBinlogRequest_TxnId_DEFAULT int64
+
+func (p *TIngestBinlogRequest) GetTxnId() (v int64) {
+ if !p.IsSetTxnId() {
+ return TIngestBinlogRequest_TxnId_DEFAULT
+ }
+ return *p.TxnId
+}
+
+var TIngestBinlogRequest_RemoteTabletId_DEFAULT int64
+
+func (p *TIngestBinlogRequest) GetRemoteTabletId() (v int64) {
+ if !p.IsSetRemoteTabletId() {
+ return TIngestBinlogRequest_RemoteTabletId_DEFAULT
+ }
+ return *p.RemoteTabletId
+}
+
+var TIngestBinlogRequest_BinlogVersion_DEFAULT int64
+
+func (p *TIngestBinlogRequest) GetBinlogVersion() (v int64) {
+ if !p.IsSetBinlogVersion() {
+ return TIngestBinlogRequest_BinlogVersion_DEFAULT
+ }
+ return *p.BinlogVersion
+}
+
+var TIngestBinlogRequest_RemoteHost_DEFAULT string
+
+func (p *TIngestBinlogRequest) GetRemoteHost() (v string) {
+ if !p.IsSetRemoteHost() {
+ return TIngestBinlogRequest_RemoteHost_DEFAULT
+ }
+ return *p.RemoteHost
+}
+
+var TIngestBinlogRequest_RemotePort_DEFAULT string
+
+func (p *TIngestBinlogRequest) GetRemotePort() (v string) {
+ if !p.IsSetRemotePort() {
+ return TIngestBinlogRequest_RemotePort_DEFAULT
+ }
+ return *p.RemotePort
+}
+
+var TIngestBinlogRequest_PartitionId_DEFAULT int64
+
+func (p *TIngestBinlogRequest) GetPartitionId() (v int64) {
+ if !p.IsSetPartitionId() {
+ return TIngestBinlogRequest_PartitionId_DEFAULT
+ }
+ return *p.PartitionId
+}
+
+var TIngestBinlogRequest_LocalTabletId_DEFAULT int64
+
+func (p *TIngestBinlogRequest) GetLocalTabletId() (v int64) {
+ if !p.IsSetLocalTabletId() {
+ return TIngestBinlogRequest_LocalTabletId_DEFAULT
+ }
+ return *p.LocalTabletId
+}
+
+var TIngestBinlogRequest_LoadId_DEFAULT *types.TUniqueId
+
+func (p *TIngestBinlogRequest) GetLoadId() (v *types.TUniqueId) {
+ if !p.IsSetLoadId() {
+ return TIngestBinlogRequest_LoadId_DEFAULT
+ }
+ return p.LoadId
+}
+func (p *TIngestBinlogRequest) SetTxnId(val *int64) {
+ p.TxnId = val
+}
+func (p *TIngestBinlogRequest) SetRemoteTabletId(val *int64) {
+ p.RemoteTabletId = val
+}
+func (p *TIngestBinlogRequest) SetBinlogVersion(val *int64) {
+ p.BinlogVersion = val
+}
+func (p *TIngestBinlogRequest) SetRemoteHost(val *string) {
+ p.RemoteHost = val
+}
+func (p *TIngestBinlogRequest) SetRemotePort(val *string) {
+ p.RemotePort = val
+}
+func (p *TIngestBinlogRequest) SetPartitionId(val *int64) {
+ p.PartitionId = val
+}
+func (p *TIngestBinlogRequest) SetLocalTabletId(val *int64) {
+ p.LocalTabletId = val
+}
+func (p *TIngestBinlogRequest) SetLoadId(val *types.TUniqueId) {
+ p.LoadId = val
+}
+
+var fieldIDToName_TIngestBinlogRequest = map[int16]string{
+ 1: "txn_id",
+ 2: "remote_tablet_id",
+ 3: "binlog_version",
+ 4: "remote_host",
+ 5: "remote_port",
+ 6: "partition_id",
+ 7: "local_tablet_id",
+ 8: "load_id",
+}
+
+func (p *TIngestBinlogRequest) IsSetTxnId() bool {
+ return p.TxnId != nil
+}
+
+func (p *TIngestBinlogRequest) IsSetRemoteTabletId() bool {
+ return p.RemoteTabletId != nil
+}
+
+func (p *TIngestBinlogRequest) IsSetBinlogVersion() bool {
+ return p.BinlogVersion != nil
+}
+
+func (p *TIngestBinlogRequest) IsSetRemoteHost() bool {
+ return p.RemoteHost != nil
+}
+
+func (p *TIngestBinlogRequest) IsSetRemotePort() bool {
+ return p.RemotePort != nil
+}
+
+func (p *TIngestBinlogRequest) IsSetPartitionId() bool {
+ return p.PartitionId != nil
+}
+
+func (p *TIngestBinlogRequest) IsSetLocalTabletId() bool {
+ return p.LocalTabletId != nil
+}
+
+func (p *TIngestBinlogRequest) IsSetLoadId() bool {
+ return p.LoadId != nil
+}
+
+func (p *TIngestBinlogRequest) Read(iprot thrift.TProtocol) (err error) {
+
+ var fieldTypeId thrift.TType
+ var fieldId int16
+
+ if _, err = iprot.ReadStructBegin(); err != nil {
+ goto ReadStructBeginError
+ }
+
+ for {
+ _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin()
+ if err != nil {
+ goto ReadFieldBeginError
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+
+ switch fieldId {
+ case 1:
+ if fieldTypeId == thrift.I64 {
+ if err = p.ReadField1(iprot); err != nil {
+ goto ReadFieldError
+ }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ case 2:
+ if fieldTypeId == thrift.I64 {
+ if err = p.ReadField2(iprot); err != nil {
+ goto ReadFieldError
+ }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ case 3:
+ if fieldTypeId == thrift.I64 {
+ if err = p.ReadField3(iprot); err != nil {
+ goto ReadFieldError
+ }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ case 4:
+ if fieldTypeId == thrift.STRING {
+ if err = p.ReadField4(iprot); err != nil {
+ goto ReadFieldError
+ }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ case 5:
+ if fieldTypeId == thrift.STRING {
+ if err = p.ReadField5(iprot); err != nil {
+ goto ReadFieldError
+ }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ case 6:
+ if fieldTypeId == thrift.I64 {
+ if err = p.ReadField6(iprot); err != nil {
+ goto ReadFieldError
+ }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ case 7:
+ if fieldTypeId == thrift.I64 {
+ if err = p.ReadField7(iprot); err != nil {
+ goto ReadFieldError
+ }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ case 8:
+ if fieldTypeId == thrift.STRUCT {
+ if err = p.ReadField8(iprot); err != nil {
+ goto ReadFieldError
+ }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ default:
+ if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ }
+ if err = iprot.ReadFieldEnd(); err != nil {
+ goto ReadFieldEndError
+ }
+ }
+ if err = iprot.ReadStructEnd(); err != nil {
+ goto ReadStructEndError
+ }
+
+ return nil
+ReadStructBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err)
+ReadFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
+ReadFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TIngestBinlogRequest[fieldId]), err)
+SkipFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
+
+ReadFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err)
+ReadStructEndError:
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+}
+
+func (p *TIngestBinlogRequest) ReadField1(iprot thrift.TProtocol) error {
+
+ var _field *int64
+ if v, err := iprot.ReadI64(); err != nil {
+ return err
+ } else {
+ _field = &v
+ }
+ p.TxnId = _field
+ return nil
+}
+func (p *TIngestBinlogRequest) ReadField2(iprot thrift.TProtocol) error {
+
+ var _field *int64
+ if v, err := iprot.ReadI64(); err != nil {
+ return err
+ } else {
+ _field = &v
+ }
+ p.RemoteTabletId = _field
+ return nil
+}
+func (p *TIngestBinlogRequest) ReadField3(iprot thrift.TProtocol) error {
+
+ var _field *int64
+ if v, err := iprot.ReadI64(); err != nil {
+ return err
+ } else {
+ _field = &v
+ }
+ p.BinlogVersion = _field
+ return nil
+}
+func (p *TIngestBinlogRequest) ReadField4(iprot thrift.TProtocol) error {
+
+ var _field *string
+ if v, err := iprot.ReadString(); err != nil {
+ return err
+ } else {
+ _field = &v
+ }
+ p.RemoteHost = _field
+ return nil
+}
+func (p *TIngestBinlogRequest) ReadField5(iprot thrift.TProtocol) error {
+
+ var _field *string
+ if v, err := iprot.ReadString(); err != nil {
+ return err
+ } else {
+ _field = &v
+ }
+ p.RemotePort = _field
+ return nil
+}
+func (p *TIngestBinlogRequest) ReadField6(iprot thrift.TProtocol) error {
+
+ var _field *int64
+ if v, err := iprot.ReadI64(); err != nil {
+ return err
+ } else {
+ _field = &v
+ }
+ p.PartitionId = _field
+ return nil
+}
+func (p *TIngestBinlogRequest) ReadField7(iprot thrift.TProtocol) error {
+
+ var _field *int64
+ if v, err := iprot.ReadI64(); err != nil {
+ return err
+ } else {
+ _field = &v
+ }
+ p.LocalTabletId = _field
+ return nil
+}
+func (p *TIngestBinlogRequest) ReadField8(iprot thrift.TProtocol) error {
+ _field := types.NewTUniqueId()
+ if err := _field.Read(iprot); err != nil {
+ return err
+ }
+ p.LoadId = _field
+ return nil
+}
+
+func (p *TIngestBinlogRequest) Write(oprot thrift.TProtocol) (err error) {
+ var fieldId int16
+ if err = oprot.WriteStructBegin("TIngestBinlogRequest"); err != nil {
+ goto WriteStructBeginError
+ }
+ if p != nil {
+ if err = p.writeField1(oprot); err != nil {
+ fieldId = 1
+ goto WriteFieldError
+ }
+ if err = p.writeField2(oprot); err != nil {
+ fieldId = 2
+ goto WriteFieldError
+ }
+ if err = p.writeField3(oprot); err != nil {
+ fieldId = 3
+ goto WriteFieldError
+ }
+ if err = p.writeField4(oprot); err != nil {
+ fieldId = 4
+ goto WriteFieldError
+ }
+ if err = p.writeField5(oprot); err != nil {
+ fieldId = 5
+ goto WriteFieldError
+ }
+ if err = p.writeField6(oprot); err != nil {
+ fieldId = 6
+ goto WriteFieldError
+ }
+ if err = p.writeField7(oprot); err != nil {
+ fieldId = 7
+ goto WriteFieldError
+ }
+ if err = p.writeField8(oprot); err != nil {
+ fieldId = 8
+ goto WriteFieldError
+ }
+ }
+ if err = oprot.WriteFieldStop(); err != nil {
+ goto WriteFieldStopError
+ }
+ if err = oprot.WriteStructEnd(); err != nil {
+ goto WriteStructEndError
+ }
+ return nil
+WriteStructBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+WriteFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err)
+WriteFieldStopError:
+ return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err)
+WriteStructEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err)
+}
+
+func (p *TIngestBinlogRequest) writeField1(oprot thrift.TProtocol) (err error) {
+ if p.IsSetTxnId() {
+ if err = oprot.WriteFieldBegin("txn_id", thrift.I64, 1); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteI64(*p.TxnId); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err)
+}
+
+func (p *TIngestBinlogRequest) writeField2(oprot thrift.TProtocol) (err error) {
+ if p.IsSetRemoteTabletId() {
+ if err = oprot.WriteFieldBegin("remote_tablet_id", thrift.I64, 2); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteI64(*p.RemoteTabletId); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err)
+}
+
+func (p *TIngestBinlogRequest) writeField3(oprot thrift.TProtocol) (err error) {
+ if p.IsSetBinlogVersion() {
+ if err = oprot.WriteFieldBegin("binlog_version", thrift.I64, 3); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteI64(*p.BinlogVersion); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err)
+}
+
+func (p *TIngestBinlogRequest) writeField4(oprot thrift.TProtocol) (err error) {
+ if p.IsSetRemoteHost() {
+ if err = oprot.WriteFieldBegin("remote_host", thrift.STRING, 4); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteString(*p.RemoteHost); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err)
+}
+
+func (p *TIngestBinlogRequest) writeField5(oprot thrift.TProtocol) (err error) {
+ if p.IsSetRemotePort() {
+ if err = oprot.WriteFieldBegin("remote_port", thrift.STRING, 5); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteString(*p.RemotePort); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 5 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err)
+}
+
+func (p *TIngestBinlogRequest) writeField6(oprot thrift.TProtocol) (err error) {
+ if p.IsSetPartitionId() {
+ if err = oprot.WriteFieldBegin("partition_id", thrift.I64, 6); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteI64(*p.PartitionId); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 6 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 6 end error: ", p), err)
+}
+
+func (p *TIngestBinlogRequest) writeField7(oprot thrift.TProtocol) (err error) {
+ if p.IsSetLocalTabletId() {
+ if err = oprot.WriteFieldBegin("local_tablet_id", thrift.I64, 7); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteI64(*p.LocalTabletId); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 7 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 7 end error: ", p), err)
+}
+
+func (p *TIngestBinlogRequest) writeField8(oprot thrift.TProtocol) (err error) {
+ if p.IsSetLoadId() {
+ if err = oprot.WriteFieldBegin("load_id", thrift.STRUCT, 8); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := p.LoadId.Write(oprot); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 8 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 8 end error: ", p), err)
+}
+
+func (p *TIngestBinlogRequest) String() string {
+ if p == nil {
+ return ""
+ }
+ return fmt.Sprintf("TIngestBinlogRequest(%+v)", *p)
+
+}
+
+func (p *TIngestBinlogRequest) DeepEqual(ano *TIngestBinlogRequest) bool {
+ if p == ano {
+ return true
+ } else if p == nil || ano == nil {
+ return false
+ }
+ if !p.Field1DeepEqual(ano.TxnId) {
+ return false
+ }
+ if !p.Field2DeepEqual(ano.RemoteTabletId) {
+ return false
+ }
+ if !p.Field3DeepEqual(ano.BinlogVersion) {
+ return false
+ }
+ if !p.Field4DeepEqual(ano.RemoteHost) {
+ return false
+ }
+ if !p.Field5DeepEqual(ano.RemotePort) {
+ return false
+ }
+ if !p.Field6DeepEqual(ano.PartitionId) {
+ return false
+ }
+ if !p.Field7DeepEqual(ano.LocalTabletId) {
+ return false
+ }
+ if !p.Field8DeepEqual(ano.LoadId) {
+ return false
+ }
+ return true
+}
+
+func (p *TIngestBinlogRequest) Field1DeepEqual(src *int64) bool {
+
+ if p.TxnId == src {
+ return true
+ } else if p.TxnId == nil || src == nil {
+ return false
+ }
+ if *p.TxnId != *src {
+ return false
+ }
+ return true
+}
+func (p *TIngestBinlogRequest) Field2DeepEqual(src *int64) bool {
+
+ if p.RemoteTabletId == src {
+ return true
+ } else if p.RemoteTabletId == nil || src == nil {
+ return false
+ }
+ if *p.RemoteTabletId != *src {
+ return false
+ }
+ return true
+}
+func (p *TIngestBinlogRequest) Field3DeepEqual(src *int64) bool {
+
+ if p.BinlogVersion == src {
+ return true
+ } else if p.BinlogVersion == nil || src == nil {
+ return false
+ }
+ if *p.BinlogVersion != *src {
+ return false
+ }
+ return true
+}
+func (p *TIngestBinlogRequest) Field4DeepEqual(src *string) bool {
+
+ if p.RemoteHost == src {
+ return true
+ } else if p.RemoteHost == nil || src == nil {
+ return false
+ }
+ if strings.Compare(*p.RemoteHost, *src) != 0 {
+ return false
+ }
+ return true
+}
+func (p *TIngestBinlogRequest) Field5DeepEqual(src *string) bool {
+
+ if p.RemotePort == src {
+ return true
+ } else if p.RemotePort == nil || src == nil {
+ return false
+ }
+ if strings.Compare(*p.RemotePort, *src) != 0 {
+ return false
+ }
+ return true
+}
+func (p *TIngestBinlogRequest) Field6DeepEqual(src *int64) bool {
+
+ if p.PartitionId == src {
+ return true
+ } else if p.PartitionId == nil || src == nil {
+ return false
+ }
+ if *p.PartitionId != *src {
+ return false
+ }
+ return true
+}
+func (p *TIngestBinlogRequest) Field7DeepEqual(src *int64) bool {
+
+ if p.LocalTabletId == src {
+ return true
+ } else if p.LocalTabletId == nil || src == nil {
+ return false
+ }
+ if *p.LocalTabletId != *src {
+ return false
+ }
+ return true
+}
+func (p *TIngestBinlogRequest) Field8DeepEqual(src *types.TUniqueId) bool {
+
+ if !p.LoadId.DeepEqual(src) {
+ return false
+ }
+ return true
+}
+
+type TIngestBinlogResult_ struct {
+ Status *status.TStatus `thrift:"status,1,optional" frugal:"1,optional,status.TStatus" json:"status,omitempty"`
+ IsAsync *bool `thrift:"is_async,2,optional" frugal:"2,optional,bool" json:"is_async,omitempty"`
+}
+
+func NewTIngestBinlogResult_() *TIngestBinlogResult_ {
+ return &TIngestBinlogResult_{}
+}
+
+func (p *TIngestBinlogResult_) InitDefault() {
+}
+
+var TIngestBinlogResult__Status_DEFAULT *status.TStatus
+
+func (p *TIngestBinlogResult_) GetStatus() (v *status.TStatus) {
+ if !p.IsSetStatus() {
+ return TIngestBinlogResult__Status_DEFAULT
+ }
+ return p.Status
+}
+
+var TIngestBinlogResult__IsAsync_DEFAULT bool
+
+func (p *TIngestBinlogResult_) GetIsAsync() (v bool) {
+ if !p.IsSetIsAsync() {
+ return TIngestBinlogResult__IsAsync_DEFAULT
+ }
+ return *p.IsAsync
+}
+func (p *TIngestBinlogResult_) SetStatus(val *status.TStatus) {
+ p.Status = val
+}
+func (p *TIngestBinlogResult_) SetIsAsync(val *bool) {
+ p.IsAsync = val
+}
+
+var fieldIDToName_TIngestBinlogResult_ = map[int16]string{
+ 1: "status",
+ 2: "is_async",
+}
+
+func (p *TIngestBinlogResult_) IsSetStatus() bool {
+ return p.Status != nil
+}
+
+func (p *TIngestBinlogResult_) IsSetIsAsync() bool {
+ return p.IsAsync != nil
+}
+
+func (p *TIngestBinlogResult_) Read(iprot thrift.TProtocol) (err error) {
+
+ var fieldTypeId thrift.TType
+ var fieldId int16
+
+ if _, err = iprot.ReadStructBegin(); err != nil {
+ goto ReadStructBeginError
+ }
+
+ for {
+ _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin()
+ if err != nil {
+ goto ReadFieldBeginError
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+
+ switch fieldId {
+ case 1:
+ if fieldTypeId == thrift.STRUCT {
+ if err = p.ReadField1(iprot); err != nil {
+ goto ReadFieldError
+ }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ case 2:
+ if fieldTypeId == thrift.BOOL {
+ if err = p.ReadField2(iprot); err != nil {
+ goto ReadFieldError
+ }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ default:
+ if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ }
+ if err = iprot.ReadFieldEnd(); err != nil {
+ goto ReadFieldEndError
+ }
+ }
+ if err = iprot.ReadStructEnd(); err != nil {
+ goto ReadStructEndError
+ }
+
+ return nil
+ReadStructBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err)
+ReadFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
+ReadFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TIngestBinlogResult_[fieldId]), err)
+SkipFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
+
+ReadFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err)
+ReadStructEndError:
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+}
+
+func (p *TIngestBinlogResult_) ReadField1(iprot thrift.TProtocol) error {
+ _field := status.NewTStatus()
+ if err := _field.Read(iprot); err != nil {
+ return err
+ }
+ p.Status = _field
+ return nil
+}
+func (p *TIngestBinlogResult_) ReadField2(iprot thrift.TProtocol) error {
+
+ var _field *bool
+ if v, err := iprot.ReadBool(); err != nil {
+ return err
+ } else {
+ _field = &v
+ }
+ p.IsAsync = _field
+ return nil
+}
+
+func (p *TIngestBinlogResult_) Write(oprot thrift.TProtocol) (err error) {
+ var fieldId int16
+ if err = oprot.WriteStructBegin("TIngestBinlogResult"); err != nil {
+ goto WriteStructBeginError
+ }
+ if p != nil {
+ if err = p.writeField1(oprot); err != nil {
+ fieldId = 1
+ goto WriteFieldError
+ }
+ if err = p.writeField2(oprot); err != nil {
+ fieldId = 2
+ goto WriteFieldError
+ }
+ }
+ if err = oprot.WriteFieldStop(); err != nil {
+ goto WriteFieldStopError
+ }
+ if err = oprot.WriteStructEnd(); err != nil {
+ goto WriteStructEndError
+ }
+ return nil
+WriteStructBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+WriteFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err)
+WriteFieldStopError:
+ return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err)
+WriteStructEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err)
+}
+
+func (p *TIngestBinlogResult_) writeField1(oprot thrift.TProtocol) (err error) {
+ if p.IsSetStatus() {
+ if err = oprot.WriteFieldBegin("status", thrift.STRUCT, 1); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := p.Status.Write(oprot); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err)
+}
+
+func (p *TIngestBinlogResult_) writeField2(oprot thrift.TProtocol) (err error) {
+ if p.IsSetIsAsync() {
+ if err = oprot.WriteFieldBegin("is_async", thrift.BOOL, 2); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteBool(*p.IsAsync); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err)
+}
+
+func (p *TIngestBinlogResult_) String() string {
+ if p == nil {
+ return ""
+ }
+ return fmt.Sprintf("TIngestBinlogResult_(%+v)", *p)
+
+}
+
+func (p *TIngestBinlogResult_) DeepEqual(ano *TIngestBinlogResult_) bool {
+ if p == ano {
+ return true
+ } else if p == nil || ano == nil {
+ return false
+ }
+ if !p.Field1DeepEqual(ano.Status) {
+ return false
+ }
+ if !p.Field2DeepEqual(ano.IsAsync) {
+ return false
+ }
+ return true
+}
+
+func (p *TIngestBinlogResult_) Field1DeepEqual(src *status.TStatus) bool {
+
+ if !p.Status.DeepEqual(src) {
+ return false
+ }
+ return true
+}
+func (p *TIngestBinlogResult_) Field2DeepEqual(src *bool) bool {
+
+ if p.IsAsync == src {
+ return true
+ } else if p.IsAsync == nil || src == nil {
+ return false
+ }
+ if *p.IsAsync != *src {
+ return false
+ }
+ return true
+}
+
+type TQueryIngestBinlogRequest struct {
+ TxnId *int64 `thrift:"txn_id,1,optional" frugal:"1,optional,i64" json:"txn_id,omitempty"`
+ PartitionId *int64 `thrift:"partition_id,2,optional" frugal:"2,optional,i64" json:"partition_id,omitempty"`
+ TabletId *int64 `thrift:"tablet_id,3,optional" frugal:"3,optional,i64" json:"tablet_id,omitempty"`
+ LoadId *types.TUniqueId `thrift:"load_id,4,optional" frugal:"4,optional,types.TUniqueId" json:"load_id,omitempty"`
+}
+
+func NewTQueryIngestBinlogRequest() *TQueryIngestBinlogRequest {
+ return &TQueryIngestBinlogRequest{}
+}
+
+func (p *TQueryIngestBinlogRequest) InitDefault() {
+}
+
+var TQueryIngestBinlogRequest_TxnId_DEFAULT int64
+
+func (p *TQueryIngestBinlogRequest) GetTxnId() (v int64) {
+ if !p.IsSetTxnId() {
+ return TQueryIngestBinlogRequest_TxnId_DEFAULT
+ }
+ return *p.TxnId
+}
+
+var TQueryIngestBinlogRequest_PartitionId_DEFAULT int64
+
+func (p *TQueryIngestBinlogRequest) GetPartitionId() (v int64) {
+ if !p.IsSetPartitionId() {
+ return TQueryIngestBinlogRequest_PartitionId_DEFAULT
+ }
+ return *p.PartitionId
+}
+
+var TQueryIngestBinlogRequest_TabletId_DEFAULT int64
+
+func (p *TQueryIngestBinlogRequest) GetTabletId() (v int64) {
+ if !p.IsSetTabletId() {
+ return TQueryIngestBinlogRequest_TabletId_DEFAULT
+ }
+ return *p.TabletId
+}
+
+var TQueryIngestBinlogRequest_LoadId_DEFAULT *types.TUniqueId
+
+func (p *TQueryIngestBinlogRequest) GetLoadId() (v *types.TUniqueId) {
+ if !p.IsSetLoadId() {
+ return TQueryIngestBinlogRequest_LoadId_DEFAULT
+ }
+ return p.LoadId
+}
+func (p *TQueryIngestBinlogRequest) SetTxnId(val *int64) {
+ p.TxnId = val
+}
+func (p *TQueryIngestBinlogRequest) SetPartitionId(val *int64) {
+ p.PartitionId = val
+}
+func (p *TQueryIngestBinlogRequest) SetTabletId(val *int64) {
+ p.TabletId = val
+}
+func (p *TQueryIngestBinlogRequest) SetLoadId(val *types.TUniqueId) {
+ p.LoadId = val
+}
+
+var fieldIDToName_TQueryIngestBinlogRequest = map[int16]string{
+ 1: "txn_id",
+ 2: "partition_id",
+ 3: "tablet_id",
+ 4: "load_id",
+}
+
+func (p *TQueryIngestBinlogRequest) IsSetTxnId() bool {
+ return p.TxnId != nil
+}
+
+func (p *TQueryIngestBinlogRequest) IsSetPartitionId() bool {
+ return p.PartitionId != nil
+}
+
+func (p *TQueryIngestBinlogRequest) IsSetTabletId() bool {
+ return p.TabletId != nil
+}
+
+func (p *TQueryIngestBinlogRequest) IsSetLoadId() bool {
+ return p.LoadId != nil
+}
+
+func (p *TQueryIngestBinlogRequest) Read(iprot thrift.TProtocol) (err error) {
+
+ var fieldTypeId thrift.TType
+ var fieldId int16
+
+ if _, err = iprot.ReadStructBegin(); err != nil {
+ goto ReadStructBeginError
+ }
+
+ for {
+ _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin()
+ if err != nil {
+ goto ReadFieldBeginError
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+
+ switch fieldId {
+ case 1:
+ if fieldTypeId == thrift.I64 {
+ if err = p.ReadField1(iprot); err != nil {
+ goto ReadFieldError
+ }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ case 2:
+ if fieldTypeId == thrift.I64 {
+ if err = p.ReadField2(iprot); err != nil {
+ goto ReadFieldError
+ }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ case 3:
+ if fieldTypeId == thrift.I64 {
+ if err = p.ReadField3(iprot); err != nil {
+ goto ReadFieldError
+ }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ case 4:
+ if fieldTypeId == thrift.STRUCT {
+ if err = p.ReadField4(iprot); err != nil {
+ goto ReadFieldError
+ }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ default:
+ if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ }
+ if err = iprot.ReadFieldEnd(); err != nil {
+ goto ReadFieldEndError
+ }
+ }
+ if err = iprot.ReadStructEnd(); err != nil {
+ goto ReadStructEndError
+ }
+
+ return nil
+ReadStructBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err)
+ReadFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
+ReadFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TQueryIngestBinlogRequest[fieldId]), err)
+SkipFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
+
+ReadFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err)
+ReadStructEndError:
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+}
+
+func (p *TQueryIngestBinlogRequest) ReadField1(iprot thrift.TProtocol) error {
+
+ var _field *int64
+ if v, err := iprot.ReadI64(); err != nil {
+ return err
+ } else {
+ _field = &v
+ }
+ p.TxnId = _field
+ return nil
+}
+func (p *TQueryIngestBinlogRequest) ReadField2(iprot thrift.TProtocol) error {
+
+ var _field *int64
+ if v, err := iprot.ReadI64(); err != nil {
+ return err
+ } else {
+ _field = &v
+ }
+ p.PartitionId = _field
+ return nil
+}
+func (p *TQueryIngestBinlogRequest) ReadField3(iprot thrift.TProtocol) error {
+
+ var _field *int64
+ if v, err := iprot.ReadI64(); err != nil {
+ return err
+ } else {
+ _field = &v
+ }
+ p.TabletId = _field
+ return nil
+}
+func (p *TQueryIngestBinlogRequest) ReadField4(iprot thrift.TProtocol) error {
+ _field := types.NewTUniqueId()
+ if err := _field.Read(iprot); err != nil {
+ return err
+ }
+ p.LoadId = _field
+ return nil
+}
+
+func (p *TQueryIngestBinlogRequest) Write(oprot thrift.TProtocol) (err error) {
+ var fieldId int16
+ if err = oprot.WriteStructBegin("TQueryIngestBinlogRequest"); err != nil {
+ goto WriteStructBeginError
+ }
+ if p != nil {
+ if err = p.writeField1(oprot); err != nil {
+ fieldId = 1
+ goto WriteFieldError
+ }
+ if err = p.writeField2(oprot); err != nil {
+ fieldId = 2
+ goto WriteFieldError
+ }
+ if err = p.writeField3(oprot); err != nil {
+ fieldId = 3
+ goto WriteFieldError
+ }
+ if err = p.writeField4(oprot); err != nil {
+ fieldId = 4
+ goto WriteFieldError
+ }
+ }
+ if err = oprot.WriteFieldStop(); err != nil {
+ goto WriteFieldStopError
+ }
+ if err = oprot.WriteStructEnd(); err != nil {
+ goto WriteStructEndError
+ }
+ return nil
+WriteStructBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+WriteFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err)
+WriteFieldStopError:
+ return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err)
+WriteStructEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err)
+}
+
+func (p *TQueryIngestBinlogRequest) writeField1(oprot thrift.TProtocol) (err error) {
+ if p.IsSetTxnId() {
+ if err = oprot.WriteFieldBegin("txn_id", thrift.I64, 1); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteI64(*p.TxnId); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err)
+}
+
+func (p *TQueryIngestBinlogRequest) writeField2(oprot thrift.TProtocol) (err error) {
+ if p.IsSetPartitionId() {
+ if err = oprot.WriteFieldBegin("partition_id", thrift.I64, 2); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteI64(*p.PartitionId); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err)
+}
+
+func (p *TQueryIngestBinlogRequest) writeField3(oprot thrift.TProtocol) (err error) {
+ if p.IsSetTabletId() {
+ if err = oprot.WriteFieldBegin("tablet_id", thrift.I64, 3); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteI64(*p.TabletId); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err)
+}
+
+func (p *TQueryIngestBinlogRequest) writeField4(oprot thrift.TProtocol) (err error) {
+ if p.IsSetLoadId() {
+ if err = oprot.WriteFieldBegin("load_id", thrift.STRUCT, 4); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := p.LoadId.Write(oprot); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err)
+}
+
+func (p *TQueryIngestBinlogRequest) String() string {
+ if p == nil {
+ return ""
+ }
+ return fmt.Sprintf("TQueryIngestBinlogRequest(%+v)", *p)
+
+}
+
+func (p *TQueryIngestBinlogRequest) DeepEqual(ano *TQueryIngestBinlogRequest) bool {
+ if p == ano {
+ return true
+ } else if p == nil || ano == nil {
+ return false
+ }
+ if !p.Field1DeepEqual(ano.TxnId) {
+ return false
+ }
+ if !p.Field2DeepEqual(ano.PartitionId) {
+ return false
+ }
+ if !p.Field3DeepEqual(ano.TabletId) {
+ return false
+ }
+ if !p.Field4DeepEqual(ano.LoadId) {
+ return false
+ }
+ return true
+}
+
+func (p *TQueryIngestBinlogRequest) Field1DeepEqual(src *int64) bool {
+
+ if p.TxnId == src {
+ return true
+ } else if p.TxnId == nil || src == nil {
+ return false
+ }
+ if *p.TxnId != *src {
+ return false
+ }
+ return true
+}
+func (p *TQueryIngestBinlogRequest) Field2DeepEqual(src *int64) bool {
+
+ if p.PartitionId == src {
+ return true
+ } else if p.PartitionId == nil || src == nil {
+ return false
+ }
+ if *p.PartitionId != *src {
+ return false
+ }
+ return true
+}
+func (p *TQueryIngestBinlogRequest) Field3DeepEqual(src *int64) bool {
+
+ if p.TabletId == src {
+ return true
+ } else if p.TabletId == nil || src == nil {
+ return false
+ }
+ if *p.TabletId != *src {
+ return false
+ }
+ return true
+}
+func (p *TQueryIngestBinlogRequest) Field4DeepEqual(src *types.TUniqueId) bool {
+
+ if !p.LoadId.DeepEqual(src) {
+ return false
+ }
+ return true
+}
+
+type TQueryIngestBinlogResult_ struct {
+ Status *TIngestBinlogStatus `thrift:"status,1,optional" frugal:"1,optional,TIngestBinlogStatus" json:"status,omitempty"`
+ ErrMsg *string `thrift:"err_msg,2,optional" frugal:"2,optional,string" json:"err_msg,omitempty"`
+}
+
+func NewTQueryIngestBinlogResult_() *TQueryIngestBinlogResult_ {
+ return &TQueryIngestBinlogResult_{}
+}
+
+func (p *TQueryIngestBinlogResult_) InitDefault() {
+}
+
+var TQueryIngestBinlogResult__Status_DEFAULT TIngestBinlogStatus
+
+func (p *TQueryIngestBinlogResult_) GetStatus() (v TIngestBinlogStatus) {
+ if !p.IsSetStatus() {
+ return TQueryIngestBinlogResult__Status_DEFAULT
+ }
+ return *p.Status
+}
+
+var TQueryIngestBinlogResult__ErrMsg_DEFAULT string
+
+func (p *TQueryIngestBinlogResult_) GetErrMsg() (v string) {
+ if !p.IsSetErrMsg() {
+ return TQueryIngestBinlogResult__ErrMsg_DEFAULT
+ }
+ return *p.ErrMsg
+}
+func (p *TQueryIngestBinlogResult_) SetStatus(val *TIngestBinlogStatus) {
+ p.Status = val
+}
+func (p *TQueryIngestBinlogResult_) SetErrMsg(val *string) {
+ p.ErrMsg = val
+}
+
+var fieldIDToName_TQueryIngestBinlogResult_ = map[int16]string{
+ 1: "status",
+ 2: "err_msg",
+}
+
+func (p *TQueryIngestBinlogResult_) IsSetStatus() bool {
+ return p.Status != nil
+}
+
+func (p *TQueryIngestBinlogResult_) IsSetErrMsg() bool {
+ return p.ErrMsg != nil
+}
+
+func (p *TQueryIngestBinlogResult_) Read(iprot thrift.TProtocol) (err error) {
+
+ var fieldTypeId thrift.TType
+ var fieldId int16
+
+ if _, err = iprot.ReadStructBegin(); err != nil {
+ goto ReadStructBeginError
+ }
+
+ for {
+ _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin()
+ if err != nil {
+ goto ReadFieldBeginError
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+
+ switch fieldId {
+ case 1:
+ if fieldTypeId == thrift.I32 {
+ if err = p.ReadField1(iprot); err != nil {
+ goto ReadFieldError
+ }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ case 2:
+ if fieldTypeId == thrift.STRING {
+ if err = p.ReadField2(iprot); err != nil {
+ goto ReadFieldError
+ }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ default:
+ if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ }
+ if err = iprot.ReadFieldEnd(); err != nil {
+ goto ReadFieldEndError
+ }
+ }
+ if err = iprot.ReadStructEnd(); err != nil {
+ goto ReadStructEndError
+ }
+
+ return nil
+ReadStructBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err)
+ReadFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
+ReadFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TQueryIngestBinlogResult_[fieldId]), err)
+SkipFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
+
+ReadFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err)
+ReadStructEndError:
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+}
+
+func (p *TQueryIngestBinlogResult_) ReadField1(iprot thrift.TProtocol) error {
+
+ var _field *TIngestBinlogStatus
+ if v, err := iprot.ReadI32(); err != nil {
+ return err
+ } else {
+ tmp := TIngestBinlogStatus(v)
+ _field = &tmp
+ }
+ p.Status = _field
+ return nil
+}
+func (p *TQueryIngestBinlogResult_) ReadField2(iprot thrift.TProtocol) error {
+
+ var _field *string
+ if v, err := iprot.ReadString(); err != nil {
+ return err
+ } else {
+ _field = &v
+ }
+ p.ErrMsg = _field
+ return nil
+}
+
+func (p *TQueryIngestBinlogResult_) Write(oprot thrift.TProtocol) (err error) {
+ var fieldId int16
+ if err = oprot.WriteStructBegin("TQueryIngestBinlogResult"); err != nil {
+ goto WriteStructBeginError
+ }
+ if p != nil {
+ if err = p.writeField1(oprot); err != nil {
+ fieldId = 1
+ goto WriteFieldError
+ }
+ if err = p.writeField2(oprot); err != nil {
+ fieldId = 2
+ goto WriteFieldError
+ }
+ }
+ if err = oprot.WriteFieldStop(); err != nil {
+ goto WriteFieldStopError
+ }
+ if err = oprot.WriteStructEnd(); err != nil {
+ goto WriteStructEndError
+ }
+ return nil
+WriteStructBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+WriteFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err)
+WriteFieldStopError:
+ return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err)
+WriteStructEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err)
+}
+
+func (p *TQueryIngestBinlogResult_) writeField1(oprot thrift.TProtocol) (err error) {
+ if p.IsSetStatus() {
+ if err = oprot.WriteFieldBegin("status", thrift.I32, 1); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteI32(int32(*p.Status)); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err)
+}
+
+func (p *TQueryIngestBinlogResult_) writeField2(oprot thrift.TProtocol) (err error) {
+ if p.IsSetErrMsg() {
+ if err = oprot.WriteFieldBegin("err_msg", thrift.STRING, 2); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteString(*p.ErrMsg); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err)
+}
+
+func (p *TQueryIngestBinlogResult_) String() string {
+ if p == nil {
+ return ""
+ }
+ return fmt.Sprintf("TQueryIngestBinlogResult_(%+v)", *p)
+
+}
+
+func (p *TQueryIngestBinlogResult_) DeepEqual(ano *TQueryIngestBinlogResult_) bool {
+ if p == ano {
+ return true
+ } else if p == nil || ano == nil {
+ return false
+ }
+ if !p.Field1DeepEqual(ano.Status) {
+ return false
+ }
+ if !p.Field2DeepEqual(ano.ErrMsg) {
+ return false
+ }
+ return true
+}
+
+func (p *TQueryIngestBinlogResult_) Field1DeepEqual(src *TIngestBinlogStatus) bool {
+
+ if p.Status == src {
+ return true
+ } else if p.Status == nil || src == nil {
+ return false
+ }
+ if *p.Status != *src {
+ return false
+ }
+ return true
+}
+func (p *TQueryIngestBinlogResult_) Field2DeepEqual(src *string) bool {
+
+ if p.ErrMsg == src {
+ return true
+ } else if p.ErrMsg == nil || src == nil {
+ return false
+ }
+ if strings.Compare(*p.ErrMsg, *src) != 0 {
+ return false
+ }
+ return true
+}
+
+type TWorkloadGroupInfo struct {
+ Id *int64 `thrift:"id,1,optional" frugal:"1,optional,i64" json:"id,omitempty"`
+ Name *string `thrift:"name,2,optional" frugal:"2,optional,string" json:"name,omitempty"`
+ Version *int64 `thrift:"version,3,optional" frugal:"3,optional,i64" json:"version,omitempty"`
+ CpuShare *int64 `thrift:"cpu_share,4,optional" frugal:"4,optional,i64" json:"cpu_share,omitempty"`
+ CpuHardLimit *int32 `thrift:"cpu_hard_limit,5,optional" frugal:"5,optional,i32" json:"cpu_hard_limit,omitempty"`
+ MemLimit *string `thrift:"mem_limit,6,optional" frugal:"6,optional,string" json:"mem_limit,omitempty"`
+ EnableMemoryOvercommit *bool `thrift:"enable_memory_overcommit,7,optional" frugal:"7,optional,bool" json:"enable_memory_overcommit,omitempty"`
+ EnableCpuHardLimit *bool `thrift:"enable_cpu_hard_limit,8,optional" frugal:"8,optional,bool" json:"enable_cpu_hard_limit,omitempty"`
+ ScanThreadNum *int32 `thrift:"scan_thread_num,9,optional" frugal:"9,optional,i32" json:"scan_thread_num,omitempty"`
+ MaxRemoteScanThreadNum *int32 `thrift:"max_remote_scan_thread_num,10,optional" frugal:"10,optional,i32" json:"max_remote_scan_thread_num,omitempty"`
+ MinRemoteScanThreadNum *int32 `thrift:"min_remote_scan_thread_num,11,optional" frugal:"11,optional,i32" json:"min_remote_scan_thread_num,omitempty"`
+ MemoryLowWatermark *int32 `thrift:"memory_low_watermark,12,optional" frugal:"12,optional,i32" json:"memory_low_watermark,omitempty"`
+ MemoryHighWatermark *int32 `thrift:"memory_high_watermark,13,optional" frugal:"13,optional,i32" json:"memory_high_watermark,omitempty"`
+ ReadBytesPerSecond *int64 `thrift:"read_bytes_per_second,14,optional" frugal:"14,optional,i64" json:"read_bytes_per_second,omitempty"`
+ RemoteReadBytesPerSecond *int64 `thrift:"remote_read_bytes_per_second,15,optional" frugal:"15,optional,i64" json:"remote_read_bytes_per_second,omitempty"`
+ Tag *string `thrift:"tag,16,optional" frugal:"16,optional,string" json:"tag,omitempty"`
+}
+
+func NewTWorkloadGroupInfo() *TWorkloadGroupInfo {
+ return &TWorkloadGroupInfo{}
+}
+
+func (p *TWorkloadGroupInfo) InitDefault() {
+}
+
+var TWorkloadGroupInfo_Id_DEFAULT int64
+
+func (p *TWorkloadGroupInfo) GetId() (v int64) {
+ if !p.IsSetId() {
+ return TWorkloadGroupInfo_Id_DEFAULT
+ }
+ return *p.Id
+}
+
+var TWorkloadGroupInfo_Name_DEFAULT string
+
+func (p *TWorkloadGroupInfo) GetName() (v string) {
+ if !p.IsSetName() {
+ return TWorkloadGroupInfo_Name_DEFAULT
+ }
+ return *p.Name
+}
+
+var TWorkloadGroupInfo_Version_DEFAULT int64
+
+func (p *TWorkloadGroupInfo) GetVersion() (v int64) {
+ if !p.IsSetVersion() {
+ return TWorkloadGroupInfo_Version_DEFAULT
+ }
+ return *p.Version
+}
+
+var TWorkloadGroupInfo_CpuShare_DEFAULT int64
+
+func (p *TWorkloadGroupInfo) GetCpuShare() (v int64) {
+ if !p.IsSetCpuShare() {
+ return TWorkloadGroupInfo_CpuShare_DEFAULT
+ }
+ return *p.CpuShare
+}
+
+var TWorkloadGroupInfo_CpuHardLimit_DEFAULT int32
+
+func (p *TWorkloadGroupInfo) GetCpuHardLimit() (v int32) {
+ if !p.IsSetCpuHardLimit() {
+ return TWorkloadGroupInfo_CpuHardLimit_DEFAULT
+ }
+ return *p.CpuHardLimit
+}
+
+var TWorkloadGroupInfo_MemLimit_DEFAULT string
+
+func (p *TWorkloadGroupInfo) GetMemLimit() (v string) {
+ if !p.IsSetMemLimit() {
+ return TWorkloadGroupInfo_MemLimit_DEFAULT
+ }
+ return *p.MemLimit
+}
+
+var TWorkloadGroupInfo_EnableMemoryOvercommit_DEFAULT bool
+
+func (p *TWorkloadGroupInfo) GetEnableMemoryOvercommit() (v bool) {
+ if !p.IsSetEnableMemoryOvercommit() {
+ return TWorkloadGroupInfo_EnableMemoryOvercommit_DEFAULT
+ }
+ return *p.EnableMemoryOvercommit
+}
+
+var TWorkloadGroupInfo_EnableCpuHardLimit_DEFAULT bool
+
+func (p *TWorkloadGroupInfo) GetEnableCpuHardLimit() (v bool) {
+ if !p.IsSetEnableCpuHardLimit() {
+ return TWorkloadGroupInfo_EnableCpuHardLimit_DEFAULT
+ }
+ return *p.EnableCpuHardLimit
+}
+
+var TWorkloadGroupInfo_ScanThreadNum_DEFAULT int32
+
+func (p *TWorkloadGroupInfo) GetScanThreadNum() (v int32) {
+ if !p.IsSetScanThreadNum() {
+ return TWorkloadGroupInfo_ScanThreadNum_DEFAULT
+ }
+ return *p.ScanThreadNum
+}
+
+var TWorkloadGroupInfo_MaxRemoteScanThreadNum_DEFAULT int32
+
+func (p *TWorkloadGroupInfo) GetMaxRemoteScanThreadNum() (v int32) {
+ if !p.IsSetMaxRemoteScanThreadNum() {
+ return TWorkloadGroupInfo_MaxRemoteScanThreadNum_DEFAULT
+ }
+ return *p.MaxRemoteScanThreadNum
+}
+
+var TWorkloadGroupInfo_MinRemoteScanThreadNum_DEFAULT int32
+
+func (p *TWorkloadGroupInfo) GetMinRemoteScanThreadNum() (v int32) {
+ if !p.IsSetMinRemoteScanThreadNum() {
+ return TWorkloadGroupInfo_MinRemoteScanThreadNum_DEFAULT
+ }
+ return *p.MinRemoteScanThreadNum
+}
+
+var TWorkloadGroupInfo_MemoryLowWatermark_DEFAULT int32
+
+func (p *TWorkloadGroupInfo) GetMemoryLowWatermark() (v int32) {
+ if !p.IsSetMemoryLowWatermark() {
+ return TWorkloadGroupInfo_MemoryLowWatermark_DEFAULT
+ }
+ return *p.MemoryLowWatermark
+}
+
+var TWorkloadGroupInfo_MemoryHighWatermark_DEFAULT int32
+
+func (p *TWorkloadGroupInfo) GetMemoryHighWatermark() (v int32) {
+ if !p.IsSetMemoryHighWatermark() {
+ return TWorkloadGroupInfo_MemoryHighWatermark_DEFAULT
+ }
+ return *p.MemoryHighWatermark
+}
+
+var TWorkloadGroupInfo_ReadBytesPerSecond_DEFAULT int64
+
+func (p *TWorkloadGroupInfo) GetReadBytesPerSecond() (v int64) {
+ if !p.IsSetReadBytesPerSecond() {
+ return TWorkloadGroupInfo_ReadBytesPerSecond_DEFAULT
+ }
+ return *p.ReadBytesPerSecond
+}
+
+var TWorkloadGroupInfo_RemoteReadBytesPerSecond_DEFAULT int64
+
+func (p *TWorkloadGroupInfo) GetRemoteReadBytesPerSecond() (v int64) {
+ if !p.IsSetRemoteReadBytesPerSecond() {
+ return TWorkloadGroupInfo_RemoteReadBytesPerSecond_DEFAULT
+ }
+ return *p.RemoteReadBytesPerSecond
+}
+
+var TWorkloadGroupInfo_Tag_DEFAULT string
+
+func (p *TWorkloadGroupInfo) GetTag() (v string) {
+ if !p.IsSetTag() {
+ return TWorkloadGroupInfo_Tag_DEFAULT
+ }
+ return *p.Tag
+}
+func (p *TWorkloadGroupInfo) SetId(val *int64) {
+ p.Id = val
+}
+func (p *TWorkloadGroupInfo) SetName(val *string) {
+ p.Name = val
+}
+func (p *TWorkloadGroupInfo) SetVersion(val *int64) {
+ p.Version = val
+}
+func (p *TWorkloadGroupInfo) SetCpuShare(val *int64) {
+ p.CpuShare = val
+}
+func (p *TWorkloadGroupInfo) SetCpuHardLimit(val *int32) {
+ p.CpuHardLimit = val
+}
+func (p *TWorkloadGroupInfo) SetMemLimit(val *string) {
+ p.MemLimit = val
+}
+func (p *TWorkloadGroupInfo) SetEnableMemoryOvercommit(val *bool) {
+ p.EnableMemoryOvercommit = val
+}
+func (p *TWorkloadGroupInfo) SetEnableCpuHardLimit(val *bool) {
+ p.EnableCpuHardLimit = val
+}
+func (p *TWorkloadGroupInfo) SetScanThreadNum(val *int32) {
+ p.ScanThreadNum = val
+}
+func (p *TWorkloadGroupInfo) SetMaxRemoteScanThreadNum(val *int32) {
+ p.MaxRemoteScanThreadNum = val
+}
+func (p *TWorkloadGroupInfo) SetMinRemoteScanThreadNum(val *int32) {
+ p.MinRemoteScanThreadNum = val
+}
+func (p *TWorkloadGroupInfo) SetMemoryLowWatermark(val *int32) {
+ p.MemoryLowWatermark = val
+}
+func (p *TWorkloadGroupInfo) SetMemoryHighWatermark(val *int32) {
+ p.MemoryHighWatermark = val
+}
+func (p *TWorkloadGroupInfo) SetReadBytesPerSecond(val *int64) {
+ p.ReadBytesPerSecond = val
+}
+func (p *TWorkloadGroupInfo) SetRemoteReadBytesPerSecond(val *int64) {
+ p.RemoteReadBytesPerSecond = val
+}
+func (p *TWorkloadGroupInfo) SetTag(val *string) {
+ p.Tag = val
+}
+
+var fieldIDToName_TWorkloadGroupInfo = map[int16]string{
+ 1: "id",
+ 2: "name",
+ 3: "version",
+ 4: "cpu_share",
+ 5: "cpu_hard_limit",
+ 6: "mem_limit",
+ 7: "enable_memory_overcommit",
+ 8: "enable_cpu_hard_limit",
+ 9: "scan_thread_num",
+ 10: "max_remote_scan_thread_num",
+ 11: "min_remote_scan_thread_num",
+ 12: "memory_low_watermark",
+ 13: "memory_high_watermark",
+ 14: "read_bytes_per_second",
+ 15: "remote_read_bytes_per_second",
+ 16: "tag",
+}
+
+func (p *TWorkloadGroupInfo) IsSetId() bool {
+ return p.Id != nil
+}
+
+func (p *TWorkloadGroupInfo) IsSetName() bool {
+ return p.Name != nil
+}
+
+func (p *TWorkloadGroupInfo) IsSetVersion() bool {
+ return p.Version != nil
+}
+
+func (p *TWorkloadGroupInfo) IsSetCpuShare() bool {
+ return p.CpuShare != nil
+}
+
+func (p *TWorkloadGroupInfo) IsSetCpuHardLimit() bool {
+ return p.CpuHardLimit != nil
+}
+
+func (p *TWorkloadGroupInfo) IsSetMemLimit() bool {
+ return p.MemLimit != nil
+}
+
+func (p *TWorkloadGroupInfo) IsSetEnableMemoryOvercommit() bool {
+ return p.EnableMemoryOvercommit != nil
+}
+
+func (p *TWorkloadGroupInfo) IsSetEnableCpuHardLimit() bool {
+ return p.EnableCpuHardLimit != nil
+}
+
+func (p *TWorkloadGroupInfo) IsSetScanThreadNum() bool {
+ return p.ScanThreadNum != nil
+}
+
+func (p *TWorkloadGroupInfo) IsSetMaxRemoteScanThreadNum() bool {
+ return p.MaxRemoteScanThreadNum != nil
+}
+
+func (p *TWorkloadGroupInfo) IsSetMinRemoteScanThreadNum() bool {
+ return p.MinRemoteScanThreadNum != nil
+}
+
+func (p *TWorkloadGroupInfo) IsSetMemoryLowWatermark() bool {
+ return p.MemoryLowWatermark != nil
+}
+
+func (p *TWorkloadGroupInfo) IsSetMemoryHighWatermark() bool {
+ return p.MemoryHighWatermark != nil
+}
+
+func (p *TWorkloadGroupInfo) IsSetReadBytesPerSecond() bool {
+ return p.ReadBytesPerSecond != nil
+}
+
+func (p *TWorkloadGroupInfo) IsSetRemoteReadBytesPerSecond() bool {
+ return p.RemoteReadBytesPerSecond != nil
+}
+
+func (p *TWorkloadGroupInfo) IsSetTag() bool {
+ return p.Tag != nil
+}
+
+func (p *TWorkloadGroupInfo) Read(iprot thrift.TProtocol) (err error) {
+
+ var fieldTypeId thrift.TType
+ var fieldId int16
+
+ if _, err = iprot.ReadStructBegin(); err != nil {
+ goto ReadStructBeginError
+ }
+
+ for {
+ _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin()
+ if err != nil {
+ goto ReadFieldBeginError
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+
+ switch fieldId {
+ case 1:
+ if fieldTypeId == thrift.I64 {
+ if err = p.ReadField1(iprot); err != nil {
+ goto ReadFieldError
+ }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ case 2:
+ if fieldTypeId == thrift.STRING {
+ if err = p.ReadField2(iprot); err != nil {
+ goto ReadFieldError
+ }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ case 3:
+ if fieldTypeId == thrift.I64 {
+ if err = p.ReadField3(iprot); err != nil {
+ goto ReadFieldError
+ }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ case 4:
+ if fieldTypeId == thrift.I64 {
+ if err = p.ReadField4(iprot); err != nil {
+ goto ReadFieldError
+ }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ case 5:
+ if fieldTypeId == thrift.I32 {
+ if err = p.ReadField5(iprot); err != nil {
+ goto ReadFieldError
+ }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ case 6:
+ if fieldTypeId == thrift.STRING {
+ if err = p.ReadField6(iprot); err != nil {
+ goto ReadFieldError
+ }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ case 7:
+ if fieldTypeId == thrift.BOOL {
+ if err = p.ReadField7(iprot); err != nil {
+ goto ReadFieldError
+ }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ case 8:
+ if fieldTypeId == thrift.BOOL {
+ if err = p.ReadField8(iprot); err != nil {
+ goto ReadFieldError
+ }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ case 9:
+ if fieldTypeId == thrift.I32 {
+ if err = p.ReadField9(iprot); err != nil {
+ goto ReadFieldError
+ }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ case 10:
+ if fieldTypeId == thrift.I32 {
+ if err = p.ReadField10(iprot); err != nil {
+ goto ReadFieldError
+ }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ case 11:
+ if fieldTypeId == thrift.I32 {
+ if err = p.ReadField11(iprot); err != nil {
+ goto ReadFieldError
+ }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ case 12:
+ if fieldTypeId == thrift.I32 {
+ if err = p.ReadField12(iprot); err != nil {
+ goto ReadFieldError
+ }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ case 13:
+ if fieldTypeId == thrift.I32 {
+ if err = p.ReadField13(iprot); err != nil {
+ goto ReadFieldError
+ }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ case 14:
+ if fieldTypeId == thrift.I64 {
+ if err = p.ReadField14(iprot); err != nil {
+ goto ReadFieldError
+ }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ case 15:
+ if fieldTypeId == thrift.I64 {
+ if err = p.ReadField15(iprot); err != nil {
+ goto ReadFieldError
+ }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ case 16:
+ if fieldTypeId == thrift.STRING {
+ if err = p.ReadField16(iprot); err != nil {
+ goto ReadFieldError
+ }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ default:
+ if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ }
+ if err = iprot.ReadFieldEnd(); err != nil {
+ goto ReadFieldEndError
+ }
+ }
+ if err = iprot.ReadStructEnd(); err != nil {
+ goto ReadStructEndError
+ }
+
+ return nil
+ReadStructBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err)
+ReadFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
+ReadFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TWorkloadGroupInfo[fieldId]), err)
+SkipFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
+
+ReadFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err)
+ReadStructEndError:
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+}
+
+func (p *TWorkloadGroupInfo) ReadField1(iprot thrift.TProtocol) error {
+
+ var _field *int64
+ if v, err := iprot.ReadI64(); err != nil {
+ return err
+ } else {
+ _field = &v
+ }
+ p.Id = _field
+ return nil
+}
+func (p *TWorkloadGroupInfo) ReadField2(iprot thrift.TProtocol) error {
+
+ var _field *string
+ if v, err := iprot.ReadString(); err != nil {
+ return err
+ } else {
+ _field = &v
+ }
+ p.Name = _field
+ return nil
+}
+func (p *TWorkloadGroupInfo) ReadField3(iprot thrift.TProtocol) error {
+
+ var _field *int64
+ if v, err := iprot.ReadI64(); err != nil {
+ return err
+ } else {
+ _field = &v
+ }
+ p.Version = _field
+ return nil
+}
+func (p *TWorkloadGroupInfo) ReadField4(iprot thrift.TProtocol) error {
+
+ var _field *int64
+ if v, err := iprot.ReadI64(); err != nil {
+ return err
+ } else {
+ _field = &v
+ }
+ p.CpuShare = _field
+ return nil
+}
+func (p *TWorkloadGroupInfo) ReadField5(iprot thrift.TProtocol) error {
+
+ var _field *int32
+ if v, err := iprot.ReadI32(); err != nil {
+ return err
+ } else {
+ _field = &v
+ }
+ p.CpuHardLimit = _field
+ return nil
+}
+func (p *TWorkloadGroupInfo) ReadField6(iprot thrift.TProtocol) error {
+
+ var _field *string
+ if v, err := iprot.ReadString(); err != nil {
+ return err
+ } else {
+ _field = &v
+ }
+ p.MemLimit = _field
+ return nil
+}
+func (p *TWorkloadGroupInfo) ReadField7(iprot thrift.TProtocol) error {
+
+ var _field *bool
+ if v, err := iprot.ReadBool(); err != nil {
+ return err
+ } else {
+ _field = &v
+ }
+ p.EnableMemoryOvercommit = _field
+ return nil
+}
+func (p *TWorkloadGroupInfo) ReadField8(iprot thrift.TProtocol) error {
+
+ var _field *bool
+ if v, err := iprot.ReadBool(); err != nil {
+ return err
+ } else {
+ _field = &v
+ }
+ p.EnableCpuHardLimit = _field
+ return nil
+}
+func (p *TWorkloadGroupInfo) ReadField9(iprot thrift.TProtocol) error {
+
+ var _field *int32
+ if v, err := iprot.ReadI32(); err != nil {
+ return err
+ } else {
+ _field = &v
+ }
+ p.ScanThreadNum = _field
+ return nil
+}
+func (p *TWorkloadGroupInfo) ReadField10(iprot thrift.TProtocol) error {
+
+ var _field *int32
+ if v, err := iprot.ReadI32(); err != nil {
+ return err
+ } else {
+ _field = &v
+ }
+ p.MaxRemoteScanThreadNum = _field
+ return nil
+}
+func (p *TWorkloadGroupInfo) ReadField11(iprot thrift.TProtocol) error {
+
+ var _field *int32
+ if v, err := iprot.ReadI32(); err != nil {
+ return err
+ } else {
+ _field = &v
+ }
+ p.MinRemoteScanThreadNum = _field
+ return nil
+}
+func (p *TWorkloadGroupInfo) ReadField12(iprot thrift.TProtocol) error {
+
+ var _field *int32
+ if v, err := iprot.ReadI32(); err != nil {
+ return err
+ } else {
+ _field = &v
+ }
+ p.MemoryLowWatermark = _field
+ return nil
+}
+func (p *TWorkloadGroupInfo) ReadField13(iprot thrift.TProtocol) error {
+
+ var _field *int32
+ if v, err := iprot.ReadI32(); err != nil {
+ return err
+ } else {
+ _field = &v
+ }
+ p.MemoryHighWatermark = _field
+ return nil
+}
+func (p *TWorkloadGroupInfo) ReadField14(iprot thrift.TProtocol) error {
+
+ var _field *int64
+ if v, err := iprot.ReadI64(); err != nil {
+ return err
+ } else {
+ _field = &v
+ }
+ p.ReadBytesPerSecond = _field
+ return nil
+}
+func (p *TWorkloadGroupInfo) ReadField15(iprot thrift.TProtocol) error {
+
+ var _field *int64
+ if v, err := iprot.ReadI64(); err != nil {
+ return err
+ } else {
+ _field = &v
+ }
+ p.RemoteReadBytesPerSecond = _field
+ return nil
+}
+func (p *TWorkloadGroupInfo) ReadField16(iprot thrift.TProtocol) error {
+
+ var _field *string
+ if v, err := iprot.ReadString(); err != nil {
+ return err
+ } else {
+ _field = &v
+ }
+ p.Tag = _field
+ return nil
+}
+
+func (p *TWorkloadGroupInfo) Write(oprot thrift.TProtocol) (err error) {
+ var fieldId int16
+ if err = oprot.WriteStructBegin("TWorkloadGroupInfo"); err != nil {
+ goto WriteStructBeginError
+ }
+ if p != nil {
+ if err = p.writeField1(oprot); err != nil {
+ fieldId = 1
+ goto WriteFieldError
+ }
+ if err = p.writeField2(oprot); err != nil {
+ fieldId = 2
+ goto WriteFieldError
+ }
+ if err = p.writeField3(oprot); err != nil {
+ fieldId = 3
+ goto WriteFieldError
+ }
+ if err = p.writeField4(oprot); err != nil {
+ fieldId = 4
+ goto WriteFieldError
+ }
+ if err = p.writeField5(oprot); err != nil {
+ fieldId = 5
+ goto WriteFieldError
+ }
+ if err = p.writeField6(oprot); err != nil {
+ fieldId = 6
+ goto WriteFieldError
+ }
+ if err = p.writeField7(oprot); err != nil {
+ fieldId = 7
+ goto WriteFieldError
+ }
+ if err = p.writeField8(oprot); err != nil {
+ fieldId = 8
+ goto WriteFieldError
+ }
+ if err = p.writeField9(oprot); err != nil {
+ fieldId = 9
+ goto WriteFieldError
+ }
+ if err = p.writeField10(oprot); err != nil {
+ fieldId = 10
+ goto WriteFieldError
+ }
+ if err = p.writeField11(oprot); err != nil {
+ fieldId = 11
+ goto WriteFieldError
+ }
+ if err = p.writeField12(oprot); err != nil {
+ fieldId = 12
+ goto WriteFieldError
+ }
+ if err = p.writeField13(oprot); err != nil {
+ fieldId = 13
+ goto WriteFieldError
+ }
+ if err = p.writeField14(oprot); err != nil {
+ fieldId = 14
+ goto WriteFieldError
+ }
+ if err = p.writeField15(oprot); err != nil {
+ fieldId = 15
+ goto WriteFieldError
+ }
+ if err = p.writeField16(oprot); err != nil {
+ fieldId = 16
+ goto WriteFieldError
+ }
+ }
+ if err = oprot.WriteFieldStop(); err != nil {
+ goto WriteFieldStopError
+ }
+ if err = oprot.WriteStructEnd(); err != nil {
+ goto WriteStructEndError
+ }
+ return nil
+WriteStructBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+WriteFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err)
+WriteFieldStopError:
+ return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err)
+WriteStructEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err)
+}
+
+func (p *TWorkloadGroupInfo) writeField1(oprot thrift.TProtocol) (err error) {
+ if p.IsSetId() {
+ if err = oprot.WriteFieldBegin("id", thrift.I64, 1); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteI64(*p.Id); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err)
+}
+
+func (p *TWorkloadGroupInfo) writeField2(oprot thrift.TProtocol) (err error) {
+ if p.IsSetName() {
+ if err = oprot.WriteFieldBegin("name", thrift.STRING, 2); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteString(*p.Name); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err)
+}
+
+func (p *TWorkloadGroupInfo) writeField3(oprot thrift.TProtocol) (err error) {
+ if p.IsSetVersion() {
+ if err = oprot.WriteFieldBegin("version", thrift.I64, 3); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteI64(*p.Version); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err)
+}
+
+func (p *TWorkloadGroupInfo) writeField4(oprot thrift.TProtocol) (err error) {
+ if p.IsSetCpuShare() {
+ if err = oprot.WriteFieldBegin("cpu_share", thrift.I64, 4); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteI64(*p.CpuShare); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err)
+}
+
+func (p *TWorkloadGroupInfo) writeField5(oprot thrift.TProtocol) (err error) {
+ if p.IsSetCpuHardLimit() {
+ if err = oprot.WriteFieldBegin("cpu_hard_limit", thrift.I32, 5); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteI32(*p.CpuHardLimit); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 5 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err)
+}
+
+func (p *TWorkloadGroupInfo) writeField6(oprot thrift.TProtocol) (err error) {
+ if p.IsSetMemLimit() {
+ if err = oprot.WriteFieldBegin("mem_limit", thrift.STRING, 6); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteString(*p.MemLimit); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 6 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 6 end error: ", p), err)
+}
+
+func (p *TWorkloadGroupInfo) writeField7(oprot thrift.TProtocol) (err error) {
+ if p.IsSetEnableMemoryOvercommit() {
+ if err = oprot.WriteFieldBegin("enable_memory_overcommit", thrift.BOOL, 7); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteBool(*p.EnableMemoryOvercommit); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 7 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 7 end error: ", p), err)
+}
+
+func (p *TWorkloadGroupInfo) writeField8(oprot thrift.TProtocol) (err error) {
+ if p.IsSetEnableCpuHardLimit() {
+ if err = oprot.WriteFieldBegin("enable_cpu_hard_limit", thrift.BOOL, 8); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteBool(*p.EnableCpuHardLimit); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 8 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 8 end error: ", p), err)
+}
+
+func (p *TWorkloadGroupInfo) writeField9(oprot thrift.TProtocol) (err error) {
+ if p.IsSetScanThreadNum() {
+ if err = oprot.WriteFieldBegin("scan_thread_num", thrift.I32, 9); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteI32(*p.ScanThreadNum); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 9 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 9 end error: ", p), err)
+}
+
+func (p *TWorkloadGroupInfo) writeField10(oprot thrift.TProtocol) (err error) {
+ if p.IsSetMaxRemoteScanThreadNum() {
+ if err = oprot.WriteFieldBegin("max_remote_scan_thread_num", thrift.I32, 10); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteI32(*p.MaxRemoteScanThreadNum); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 10 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 10 end error: ", p), err)
+}
+
+func (p *TWorkloadGroupInfo) writeField11(oprot thrift.TProtocol) (err error) {
+ if p.IsSetMinRemoteScanThreadNum() {
+ if err = oprot.WriteFieldBegin("min_remote_scan_thread_num", thrift.I32, 11); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteI32(*p.MinRemoteScanThreadNum); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 11 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 11 end error: ", p), err)
+}
+
+func (p *TWorkloadGroupInfo) writeField12(oprot thrift.TProtocol) (err error) {
+ if p.IsSetMemoryLowWatermark() {
+ if err = oprot.WriteFieldBegin("memory_low_watermark", thrift.I32, 12); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteI32(*p.MemoryLowWatermark); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 12 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 12 end error: ", p), err)
+}
+
+func (p *TWorkloadGroupInfo) writeField13(oprot thrift.TProtocol) (err error) {
+ if p.IsSetMemoryHighWatermark() {
+ if err = oprot.WriteFieldBegin("memory_high_watermark", thrift.I32, 13); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteI32(*p.MemoryHighWatermark); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 13 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 13 end error: ", p), err)
+}
+
+func (p *TWorkloadGroupInfo) writeField14(oprot thrift.TProtocol) (err error) {
+ if p.IsSetReadBytesPerSecond() {
+ if err = oprot.WriteFieldBegin("read_bytes_per_second", thrift.I64, 14); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteI64(*p.ReadBytesPerSecond); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 14 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 14 end error: ", p), err)
+}
+
+func (p *TWorkloadGroupInfo) writeField15(oprot thrift.TProtocol) (err error) {
+ if p.IsSetRemoteReadBytesPerSecond() {
+ if err = oprot.WriteFieldBegin("remote_read_bytes_per_second", thrift.I64, 15); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteI64(*p.RemoteReadBytesPerSecond); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 15 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 15 end error: ", p), err)
+}
+
+func (p *TWorkloadGroupInfo) writeField16(oprot thrift.TProtocol) (err error) {
+ if p.IsSetTag() {
+ if err = oprot.WriteFieldBegin("tag", thrift.STRING, 16); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteString(*p.Tag); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 16 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 16 end error: ", p), err)
+}
+
+func (p *TWorkloadGroupInfo) String() string {
+ if p == nil {
+ return ""
+ }
+ return fmt.Sprintf("TWorkloadGroupInfo(%+v)", *p)
+
+}
+
+func (p *TWorkloadGroupInfo) DeepEqual(ano *TWorkloadGroupInfo) bool {
+ if p == ano {
+ return true
+ } else if p == nil || ano == nil {
+ return false
+ }
+ if !p.Field1DeepEqual(ano.Id) {
+ return false
+ }
+ if !p.Field2DeepEqual(ano.Name) {
+ return false
+ }
+ if !p.Field3DeepEqual(ano.Version) {
+ return false
+ }
+ if !p.Field4DeepEqual(ano.CpuShare) {
+ return false
+ }
+ if !p.Field5DeepEqual(ano.CpuHardLimit) {
+ return false
+ }
+ if !p.Field6DeepEqual(ano.MemLimit) {
+ return false
+ }
+ if !p.Field7DeepEqual(ano.EnableMemoryOvercommit) {
+ return false
+ }
+ if !p.Field8DeepEqual(ano.EnableCpuHardLimit) {
+ return false
+ }
+ if !p.Field9DeepEqual(ano.ScanThreadNum) {
+ return false
+ }
+ if !p.Field10DeepEqual(ano.MaxRemoteScanThreadNum) {
+ return false
+ }
+ if !p.Field11DeepEqual(ano.MinRemoteScanThreadNum) {
+ return false
+ }
+ if !p.Field12DeepEqual(ano.MemoryLowWatermark) {
+ return false
+ }
+ if !p.Field13DeepEqual(ano.MemoryHighWatermark) {
+ return false
+ }
+ if !p.Field14DeepEqual(ano.ReadBytesPerSecond) {
+ return false
+ }
+ if !p.Field15DeepEqual(ano.RemoteReadBytesPerSecond) {
+ return false
+ }
+ if !p.Field16DeepEqual(ano.Tag) {
+ return false
+ }
+ return true
+}
+
+func (p *TWorkloadGroupInfo) Field1DeepEqual(src *int64) bool {
+
+ if p.Id == src {
+ return true
+ } else if p.Id == nil || src == nil {
+ return false
+ }
+ if *p.Id != *src {
+ return false
+ }
+ return true
+}
+func (p *TWorkloadGroupInfo) Field2DeepEqual(src *string) bool {
+
+ if p.Name == src {
+ return true
+ } else if p.Name == nil || src == nil {
+ return false
+ }
+ if strings.Compare(*p.Name, *src) != 0 {
+ return false
+ }
+ return true
+}
+func (p *TWorkloadGroupInfo) Field3DeepEqual(src *int64) bool {
+
+ if p.Version == src {
+ return true
+ } else if p.Version == nil || src == nil {
+ return false
+ }
+ if *p.Version != *src {
+ return false
+ }
+ return true
+}
+func (p *TWorkloadGroupInfo) Field4DeepEqual(src *int64) bool {
+
+ if p.CpuShare == src {
+ return true
+ } else if p.CpuShare == nil || src == nil {
+ return false
+ }
+ if *p.CpuShare != *src {
+ return false
+ }
+ return true
+}
+func (p *TWorkloadGroupInfo) Field5DeepEqual(src *int32) bool {
+
+ if p.CpuHardLimit == src {
+ return true
+ } else if p.CpuHardLimit == nil || src == nil {
+ return false
+ }
+ if *p.CpuHardLimit != *src {
+ return false
+ }
+ return true
+}
+func (p *TWorkloadGroupInfo) Field6DeepEqual(src *string) bool {
+
+ if p.MemLimit == src {
+ return true
+ } else if p.MemLimit == nil || src == nil {
+ return false
+ }
+ if strings.Compare(*p.MemLimit, *src) != 0 {
+ return false
+ }
+ return true
+}
+func (p *TWorkloadGroupInfo) Field7DeepEqual(src *bool) bool {
+
+ if p.EnableMemoryOvercommit == src {
+ return true
+ } else if p.EnableMemoryOvercommit == nil || src == nil {
+ return false
+ }
+ if *p.EnableMemoryOvercommit != *src {
+ return false
+ }
+ return true
+}
+func (p *TWorkloadGroupInfo) Field8DeepEqual(src *bool) bool {
+
+ if p.EnableCpuHardLimit == src {
+ return true
+ } else if p.EnableCpuHardLimit == nil || src == nil {
+ return false
+ }
+ if *p.EnableCpuHardLimit != *src {
+ return false
+ }
+ return true
+}
+func (p *TWorkloadGroupInfo) Field9DeepEqual(src *int32) bool {
+
+ if p.ScanThreadNum == src {
+ return true
+ } else if p.ScanThreadNum == nil || src == nil {
+ return false
+ }
+ if *p.ScanThreadNum != *src {
+ return false
+ }
+ return true
+}
+func (p *TWorkloadGroupInfo) Field10DeepEqual(src *int32) bool {
+
+ if p.MaxRemoteScanThreadNum == src {
+ return true
+ } else if p.MaxRemoteScanThreadNum == nil || src == nil {
+ return false
+ }
+ if *p.MaxRemoteScanThreadNum != *src {
+ return false
+ }
+ return true
+}
+func (p *TWorkloadGroupInfo) Field11DeepEqual(src *int32) bool {
+
+ if p.MinRemoteScanThreadNum == src {
+ return true
+ } else if p.MinRemoteScanThreadNum == nil || src == nil {
+ return false
+ }
+ if *p.MinRemoteScanThreadNum != *src {
+ return false
+ }
+ return true
+}
+func (p *TWorkloadGroupInfo) Field12DeepEqual(src *int32) bool {
+
+ if p.MemoryLowWatermark == src {
+ return true
+ } else if p.MemoryLowWatermark == nil || src == nil {
+ return false
+ }
+ if *p.MemoryLowWatermark != *src {
+ return false
+ }
+ return true
+}
+func (p *TWorkloadGroupInfo) Field13DeepEqual(src *int32) bool {
+
+ if p.MemoryHighWatermark == src {
+ return true
+ } else if p.MemoryHighWatermark == nil || src == nil {
+ return false
+ }
+ if *p.MemoryHighWatermark != *src {
+ return false
+ }
+ return true
+}
+func (p *TWorkloadGroupInfo) Field14DeepEqual(src *int64) bool {
+
+ if p.ReadBytesPerSecond == src {
+ return true
+ } else if p.ReadBytesPerSecond == nil || src == nil {
+ return false
+ }
+ if *p.ReadBytesPerSecond != *src {
+ return false
+ }
+ return true
+}
+func (p *TWorkloadGroupInfo) Field15DeepEqual(src *int64) bool {
+
+ if p.RemoteReadBytesPerSecond == src {
+ return true
+ } else if p.RemoteReadBytesPerSecond == nil || src == nil {
+ return false
+ }
+ if *p.RemoteReadBytesPerSecond != *src {
+ return false
+ }
+ return true
+}
+func (p *TWorkloadGroupInfo) Field16DeepEqual(src *string) bool {
+
+ if p.Tag == src {
+ return true
+ } else if p.Tag == nil || src == nil {
+ return false
+ }
+ if strings.Compare(*p.Tag, *src) != 0 {
+ return false
+ }
+ return true
+}
+
+type TWorkloadCondition struct {
+ MetricName *TWorkloadMetricType `thrift:"metric_name,1,optional" frugal:"1,optional,TWorkloadMetricType" json:"metric_name,omitempty"`
+ Op *TCompareOperator `thrift:"op,2,optional" frugal:"2,optional,TCompareOperator" json:"op,omitempty"`
+ Value *string `thrift:"value,3,optional" frugal:"3,optional,string" json:"value,omitempty"`
+}
+
+func NewTWorkloadCondition() *TWorkloadCondition {
+ return &TWorkloadCondition{}
+}
+
+func (p *TWorkloadCondition) InitDefault() {
+}
+
+var TWorkloadCondition_MetricName_DEFAULT TWorkloadMetricType
+
+func (p *TWorkloadCondition) GetMetricName() (v TWorkloadMetricType) {
+ if !p.IsSetMetricName() {
+ return TWorkloadCondition_MetricName_DEFAULT
+ }
+ return *p.MetricName
+}
+
+var TWorkloadCondition_Op_DEFAULT TCompareOperator
+
+func (p *TWorkloadCondition) GetOp() (v TCompareOperator) {
+ if !p.IsSetOp() {
+ return TWorkloadCondition_Op_DEFAULT
+ }
+ return *p.Op
+}
+
+var TWorkloadCondition_Value_DEFAULT string
+
+func (p *TWorkloadCondition) GetValue() (v string) {
+ if !p.IsSetValue() {
+ return TWorkloadCondition_Value_DEFAULT
+ }
+ return *p.Value
+}
+func (p *TWorkloadCondition) SetMetricName(val *TWorkloadMetricType) {
+ p.MetricName = val
+}
+func (p *TWorkloadCondition) SetOp(val *TCompareOperator) {
+ p.Op = val
+}
+func (p *TWorkloadCondition) SetValue(val *string) {
+ p.Value = val
+}
+
+var fieldIDToName_TWorkloadCondition = map[int16]string{
+ 1: "metric_name",
+ 2: "op",
+ 3: "value",
+}
+
+func (p *TWorkloadCondition) IsSetMetricName() bool {
+ return p.MetricName != nil
+}
+
+func (p *TWorkloadCondition) IsSetOp() bool {
+ return p.Op != nil
+}
+
+func (p *TWorkloadCondition) IsSetValue() bool {
+ return p.Value != nil
+}
+
+func (p *TWorkloadCondition) Read(iprot thrift.TProtocol) (err error) {
+
+ var fieldTypeId thrift.TType
+ var fieldId int16
+
+ if _, err = iprot.ReadStructBegin(); err != nil {
+ goto ReadStructBeginError
+ }
+
+ for {
+ _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin()
+ if err != nil {
+ goto ReadFieldBeginError
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+
+ switch fieldId {
+ case 1:
+ if fieldTypeId == thrift.I32 {
+ if err = p.ReadField1(iprot); err != nil {
+ goto ReadFieldError
+ }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ case 2:
+ if fieldTypeId == thrift.I32 {
+ if err = p.ReadField2(iprot); err != nil {
+ goto ReadFieldError
+ }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ case 3:
+ if fieldTypeId == thrift.STRING {
+ if err = p.ReadField3(iprot); err != nil {
+ goto ReadFieldError
+ }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ default:
+ if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ }
+ if err = iprot.ReadFieldEnd(); err != nil {
+ goto ReadFieldEndError
+ }
+ }
+ if err = iprot.ReadStructEnd(); err != nil {
+ goto ReadStructEndError
+ }
+
+ return nil
+ReadStructBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err)
+ReadFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
+ReadFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TWorkloadCondition[fieldId]), err)
+SkipFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
+
+ReadFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err)
+ReadStructEndError:
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+}
+
+func (p *TWorkloadCondition) ReadField1(iprot thrift.TProtocol) error {
+
+ var _field *TWorkloadMetricType
+ if v, err := iprot.ReadI32(); err != nil {
+ return err
+ } else {
+ tmp := TWorkloadMetricType(v)
+ _field = &tmp
+ }
+ p.MetricName = _field
+ return nil
+}
+func (p *TWorkloadCondition) ReadField2(iprot thrift.TProtocol) error {
+
+ var _field *TCompareOperator
+ if v, err := iprot.ReadI32(); err != nil {
+ return err
+ } else {
+ tmp := TCompareOperator(v)
+ _field = &tmp
+ }
+ p.Op = _field
+ return nil
+}
+func (p *TWorkloadCondition) ReadField3(iprot thrift.TProtocol) error {
+
+ var _field *string
+ if v, err := iprot.ReadString(); err != nil {
+ return err
+ } else {
+ _field = &v
+ }
+ p.Value = _field
+ return nil
+}
+
+func (p *TWorkloadCondition) Write(oprot thrift.TProtocol) (err error) {
+ var fieldId int16
+ if err = oprot.WriteStructBegin("TWorkloadCondition"); err != nil {
+ goto WriteStructBeginError
+ }
+ if p != nil {
+ if err = p.writeField1(oprot); err != nil {
+ fieldId = 1
+ goto WriteFieldError
+ }
+ if err = p.writeField2(oprot); err != nil {
+ fieldId = 2
+ goto WriteFieldError
+ }
+ if err = p.writeField3(oprot); err != nil {
+ fieldId = 3
+ goto WriteFieldError
+ }
+ }
+ if err = oprot.WriteFieldStop(); err != nil {
+ goto WriteFieldStopError
+ }
+ if err = oprot.WriteStructEnd(); err != nil {
+ goto WriteStructEndError
+ }
+ return nil
+WriteStructBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+WriteFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err)
+WriteFieldStopError:
+ return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err)
+WriteStructEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err)
+}
+
+func (p *TWorkloadCondition) writeField1(oprot thrift.TProtocol) (err error) {
+ if p.IsSetMetricName() {
+ if err = oprot.WriteFieldBegin("metric_name", thrift.I32, 1); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteI32(int32(*p.MetricName)); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err)
+}
+
+func (p *TWorkloadCondition) writeField2(oprot thrift.TProtocol) (err error) {
+ if p.IsSetOp() {
+ if err = oprot.WriteFieldBegin("op", thrift.I32, 2); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteI32(int32(*p.Op)); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err)
+}
+
+func (p *TWorkloadCondition) writeField3(oprot thrift.TProtocol) (err error) {
+ if p.IsSetValue() {
+ if err = oprot.WriteFieldBegin("value", thrift.STRING, 3); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteString(*p.Value); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err)
+}
+
+func (p *TWorkloadCondition) String() string {
+ if p == nil {
+ return ""
+ }
+ return fmt.Sprintf("TWorkloadCondition(%+v)", *p)
+
+}
+
+func (p *TWorkloadCondition) DeepEqual(ano *TWorkloadCondition) bool {
+ if p == ano {
+ return true
+ } else if p == nil || ano == nil {
+ return false
+ }
+ if !p.Field1DeepEqual(ano.MetricName) {
+ return false
+ }
+ if !p.Field2DeepEqual(ano.Op) {
+ return false
+ }
+ if !p.Field3DeepEqual(ano.Value) {
+ return false
+ }
+ return true
+}
+
+func (p *TWorkloadCondition) Field1DeepEqual(src *TWorkloadMetricType) bool {
+
+ if p.MetricName == src {
+ return true
+ } else if p.MetricName == nil || src == nil {
+ return false
+ }
+ if *p.MetricName != *src {
+ return false
+ }
+ return true
+}
+func (p *TWorkloadCondition) Field2DeepEqual(src *TCompareOperator) bool {
+
+ if p.Op == src {
+ return true
+ } else if p.Op == nil || src == nil {
+ return false
+ }
+ if *p.Op != *src {
+ return false
+ }
+ return true
+}
+func (p *TWorkloadCondition) Field3DeepEqual(src *string) bool {
+
+ if p.Value == src {
+ return true
+ } else if p.Value == nil || src == nil {
+ return false
+ }
+ if strings.Compare(*p.Value, *src) != 0 {
+ return false
+ }
+ return true
+}
+
+type TWorkloadAction struct {
+ Action *TWorkloadActionType `thrift:"action,1,optional" frugal:"1,optional,TWorkloadActionType" json:"action,omitempty"`
+ ActionArgs_ *string `thrift:"action_args,2,optional" frugal:"2,optional,string" json:"action_args,omitempty"`
+}
+
+func NewTWorkloadAction() *TWorkloadAction {
+ return &TWorkloadAction{}
+}
+
+func (p *TWorkloadAction) InitDefault() {
+}
+
+var TWorkloadAction_Action_DEFAULT TWorkloadActionType
+
+func (p *TWorkloadAction) GetAction() (v TWorkloadActionType) {
+ if !p.IsSetAction() {
+ return TWorkloadAction_Action_DEFAULT
+ }
+ return *p.Action
+}
+
+var TWorkloadAction_ActionArgs__DEFAULT string
+
+func (p *TWorkloadAction) GetActionArgs_() (v string) {
+ if !p.IsSetActionArgs_() {
+ return TWorkloadAction_ActionArgs__DEFAULT
+ }
+ return *p.ActionArgs_
+}
+func (p *TWorkloadAction) SetAction(val *TWorkloadActionType) {
+ p.Action = val
+}
+func (p *TWorkloadAction) SetActionArgs_(val *string) {
+ p.ActionArgs_ = val
+}
+
+var fieldIDToName_TWorkloadAction = map[int16]string{
+ 1: "action",
+ 2: "action_args",
+}
+
+func (p *TWorkloadAction) IsSetAction() bool {
+ return p.Action != nil
+}
+
+func (p *TWorkloadAction) IsSetActionArgs_() bool {
+ return p.ActionArgs_ != nil
+}
+
+func (p *TWorkloadAction) Read(iprot thrift.TProtocol) (err error) {
+
+ var fieldTypeId thrift.TType
+ var fieldId int16
+
+ if _, err = iprot.ReadStructBegin(); err != nil {
+ goto ReadStructBeginError
+ }
+
+ for {
+ _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin()
+ if err != nil {
+ goto ReadFieldBeginError
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+
+ switch fieldId {
+ case 1:
+ if fieldTypeId == thrift.I32 {
+ if err = p.ReadField1(iprot); err != nil {
+ goto ReadFieldError
+ }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ case 2:
+ if fieldTypeId == thrift.STRING {
+ if err = p.ReadField2(iprot); err != nil {
+ goto ReadFieldError
+ }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ default:
+ if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ }
+ if err = iprot.ReadFieldEnd(); err != nil {
+ goto ReadFieldEndError
+ }
+ }
+ if err = iprot.ReadStructEnd(); err != nil {
+ goto ReadStructEndError
+ }
+
+ return nil
+ReadStructBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err)
+ReadFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
+ReadFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TWorkloadAction[fieldId]), err)
+SkipFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
+
+ReadFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err)
+ReadStructEndError:
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+}
+
+func (p *TWorkloadAction) ReadField1(iprot thrift.TProtocol) error {
+
+ var _field *TWorkloadActionType
+ if v, err := iprot.ReadI32(); err != nil {
+ return err
+ } else {
+ tmp := TWorkloadActionType(v)
+ _field = &tmp
+ }
+ p.Action = _field
+ return nil
+}
+func (p *TWorkloadAction) ReadField2(iprot thrift.TProtocol) error {
+
+ var _field *string
+ if v, err := iprot.ReadString(); err != nil {
+ return err
+ } else {
+ _field = &v
+ }
+ p.ActionArgs_ = _field
+ return nil
+}
+
+func (p *TWorkloadAction) Write(oprot thrift.TProtocol) (err error) {
+ var fieldId int16
+ if err = oprot.WriteStructBegin("TWorkloadAction"); err != nil {
+ goto WriteStructBeginError
+ }
+ if p != nil {
+ if err = p.writeField1(oprot); err != nil {
+ fieldId = 1
+ goto WriteFieldError
+ }
+ if err = p.writeField2(oprot); err != nil {
+ fieldId = 2
+ goto WriteFieldError
+ }
+ }
+ if err = oprot.WriteFieldStop(); err != nil {
+ goto WriteFieldStopError
+ }
+ if err = oprot.WriteStructEnd(); err != nil {
+ goto WriteStructEndError
+ }
+ return nil
+WriteStructBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+WriteFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err)
+WriteFieldStopError:
+ return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err)
+WriteStructEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err)
+}
+
+func (p *TWorkloadAction) writeField1(oprot thrift.TProtocol) (err error) {
+ if p.IsSetAction() {
+ if err = oprot.WriteFieldBegin("action", thrift.I32, 1); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteI32(int32(*p.Action)); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err)
+}
+
+func (p *TWorkloadAction) writeField2(oprot thrift.TProtocol) (err error) {
+ if p.IsSetActionArgs_() {
+ if err = oprot.WriteFieldBegin("action_args", thrift.STRING, 2); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteString(*p.ActionArgs_); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err)
+}
+
+func (p *TWorkloadAction) String() string {
+ if p == nil {
+ return ""
+ }
+ return fmt.Sprintf("TWorkloadAction(%+v)", *p)
+
+}
+
+func (p *TWorkloadAction) DeepEqual(ano *TWorkloadAction) bool {
+ if p == ano {
+ return true
+ } else if p == nil || ano == nil {
+ return false
+ }
+ if !p.Field1DeepEqual(ano.Action) {
+ return false
+ }
+ if !p.Field2DeepEqual(ano.ActionArgs_) {
+ return false
+ }
+ return true
+}
+
+func (p *TWorkloadAction) Field1DeepEqual(src *TWorkloadActionType) bool {
+
+ if p.Action == src {
+ return true
+ } else if p.Action == nil || src == nil {
+ return false
+ }
+ if *p.Action != *src {
+ return false
+ }
+ return true
+}
+func (p *TWorkloadAction) Field2DeepEqual(src *string) bool {
+
+ if p.ActionArgs_ == src {
+ return true
+ } else if p.ActionArgs_ == nil || src == nil {
+ return false
+ }
+ if strings.Compare(*p.ActionArgs_, *src) != 0 {
+ return false
+ }
+ return true
+}
+
+type TWorkloadSchedPolicy struct {
+ Id *int64 `thrift:"id,1,optional" frugal:"1,optional,i64" json:"id,omitempty"`
+ Name *string `thrift:"name,2,optional" frugal:"2,optional,string" json:"name,omitempty"`
+ Version *int32 `thrift:"version,3,optional" frugal:"3,optional,i32" json:"version,omitempty"`
+ Priority *int32 `thrift:"priority,4,optional" frugal:"4,optional,i32" json:"priority,omitempty"`
+ Enabled *bool `thrift:"enabled,5,optional" frugal:"5,optional,bool" json:"enabled,omitempty"`
+ ConditionList []*TWorkloadCondition `thrift:"condition_list,6,optional" frugal:"6,optional,list" json:"condition_list,omitempty"`
+ ActionList []*TWorkloadAction `thrift:"action_list,7,optional" frugal:"7,optional,list" json:"action_list,omitempty"`
+ WgIdList []int64 `thrift:"wg_id_list,8,optional" frugal:"8,optional,list" json:"wg_id_list,omitempty"`
+}
+
+func NewTWorkloadSchedPolicy() *TWorkloadSchedPolicy {
+ return &TWorkloadSchedPolicy{}
+}
+
+func (p *TWorkloadSchedPolicy) InitDefault() {
+}
+
+var TWorkloadSchedPolicy_Id_DEFAULT int64
+
+func (p *TWorkloadSchedPolicy) GetId() (v int64) {
+ if !p.IsSetId() {
+ return TWorkloadSchedPolicy_Id_DEFAULT
+ }
+ return *p.Id
+}
+
+var TWorkloadSchedPolicy_Name_DEFAULT string
+
+func (p *TWorkloadSchedPolicy) GetName() (v string) {
+ if !p.IsSetName() {
+ return TWorkloadSchedPolicy_Name_DEFAULT
+ }
+ return *p.Name
+}
+
+var TWorkloadSchedPolicy_Version_DEFAULT int32
+
+func (p *TWorkloadSchedPolicy) GetVersion() (v int32) {
+ if !p.IsSetVersion() {
+ return TWorkloadSchedPolicy_Version_DEFAULT
+ }
+ return *p.Version
+}
+
+var TWorkloadSchedPolicy_Priority_DEFAULT int32
+
+func (p *TWorkloadSchedPolicy) GetPriority() (v int32) {
+ if !p.IsSetPriority() {
+ return TWorkloadSchedPolicy_Priority_DEFAULT
+ }
+ return *p.Priority
+}
+
+var TWorkloadSchedPolicy_Enabled_DEFAULT bool
+
+func (p *TWorkloadSchedPolicy) GetEnabled() (v bool) {
+ if !p.IsSetEnabled() {
+ return TWorkloadSchedPolicy_Enabled_DEFAULT
+ }
+ return *p.Enabled
+}
+
+var TWorkloadSchedPolicy_ConditionList_DEFAULT []*TWorkloadCondition
+
+func (p *TWorkloadSchedPolicy) GetConditionList() (v []*TWorkloadCondition) {
+ if !p.IsSetConditionList() {
+ return TWorkloadSchedPolicy_ConditionList_DEFAULT
+ }
+ return p.ConditionList
+}
+
+var TWorkloadSchedPolicy_ActionList_DEFAULT []*TWorkloadAction
+
+func (p *TWorkloadSchedPolicy) GetActionList() (v []*TWorkloadAction) {
+ if !p.IsSetActionList() {
+ return TWorkloadSchedPolicy_ActionList_DEFAULT
+ }
+ return p.ActionList
+}
+
+var TWorkloadSchedPolicy_WgIdList_DEFAULT []int64
+
+func (p *TWorkloadSchedPolicy) GetWgIdList() (v []int64) {
+ if !p.IsSetWgIdList() {
+ return TWorkloadSchedPolicy_WgIdList_DEFAULT
+ }
+ return p.WgIdList
+}
+func (p *TWorkloadSchedPolicy) SetId(val *int64) {
+ p.Id = val
+}
+func (p *TWorkloadSchedPolicy) SetName(val *string) {
+ p.Name = val
+}
+func (p *TWorkloadSchedPolicy) SetVersion(val *int32) {
+ p.Version = val
+}
+func (p *TWorkloadSchedPolicy) SetPriority(val *int32) {
+ p.Priority = val
+}
+func (p *TWorkloadSchedPolicy) SetEnabled(val *bool) {
+ p.Enabled = val
+}
+func (p *TWorkloadSchedPolicy) SetConditionList(val []*TWorkloadCondition) {
+ p.ConditionList = val
+}
+func (p *TWorkloadSchedPolicy) SetActionList(val []*TWorkloadAction) {
+ p.ActionList = val
+}
+func (p *TWorkloadSchedPolicy) SetWgIdList(val []int64) {
+ p.WgIdList = val
+}
+
+var fieldIDToName_TWorkloadSchedPolicy = map[int16]string{
+ 1: "id",
+ 2: "name",
+ 3: "version",
+ 4: "priority",
+ 5: "enabled",
+ 6: "condition_list",
+ 7: "action_list",
+ 8: "wg_id_list",
+}
+
+func (p *TWorkloadSchedPolicy) IsSetId() bool {
+ return p.Id != nil
+}
+
+func (p *TWorkloadSchedPolicy) IsSetName() bool {
+ return p.Name != nil
+}
+
+func (p *TWorkloadSchedPolicy) IsSetVersion() bool {
+ return p.Version != nil
+}
+
+func (p *TWorkloadSchedPolicy) IsSetPriority() bool {
+ return p.Priority != nil
+}
+
+func (p *TWorkloadSchedPolicy) IsSetEnabled() bool {
+ return p.Enabled != nil
+}
+
+func (p *TWorkloadSchedPolicy) IsSetConditionList() bool {
+ return p.ConditionList != nil
+}
+
+func (p *TWorkloadSchedPolicy) IsSetActionList() bool {
+ return p.ActionList != nil
+}
+
+func (p *TWorkloadSchedPolicy) IsSetWgIdList() bool {
+ return p.WgIdList != nil
+}
+
+func (p *TWorkloadSchedPolicy) Read(iprot thrift.TProtocol) (err error) {
+
+ var fieldTypeId thrift.TType
+ var fieldId int16
+
+ if _, err = iprot.ReadStructBegin(); err != nil {
+ goto ReadStructBeginError
+ }
+
+ for {
+ _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin()
+ if err != nil {
+ goto ReadFieldBeginError
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+
+ switch fieldId {
+ case 1:
+ if fieldTypeId == thrift.I64 {
+ if err = p.ReadField1(iprot); err != nil {
+ goto ReadFieldError
+ }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ case 2:
+ if fieldTypeId == thrift.STRING {
+ if err = p.ReadField2(iprot); err != nil {
+ goto ReadFieldError
+ }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ case 3:
+ if fieldTypeId == thrift.I32 {
+ if err = p.ReadField3(iprot); err != nil {
+ goto ReadFieldError
+ }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ case 4:
+ if fieldTypeId == thrift.I32 {
+ if err = p.ReadField4(iprot); err != nil {
+ goto ReadFieldError
+ }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ case 5:
+ if fieldTypeId == thrift.BOOL {
+ if err = p.ReadField5(iprot); err != nil {
+ goto ReadFieldError
+ }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ case 6:
+ if fieldTypeId == thrift.LIST {
+ if err = p.ReadField6(iprot); err != nil {
+ goto ReadFieldError
+ }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ case 7:
+ if fieldTypeId == thrift.LIST {
+ if err = p.ReadField7(iprot); err != nil {
+ goto ReadFieldError
+ }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ case 8:
+ if fieldTypeId == thrift.LIST {
+ if err = p.ReadField8(iprot); err != nil {
+ goto ReadFieldError
+ }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ default:
+ if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ }
+ if err = iprot.ReadFieldEnd(); err != nil {
+ goto ReadFieldEndError
+ }
+ }
+ if err = iprot.ReadStructEnd(); err != nil {
+ goto ReadStructEndError
+ }
+
+ return nil
+ReadStructBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err)
+ReadFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
+ReadFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TWorkloadSchedPolicy[fieldId]), err)
+SkipFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
+
+ReadFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err)
+ReadStructEndError:
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+}
+
+func (p *TWorkloadSchedPolicy) ReadField1(iprot thrift.TProtocol) error {
+
+ var _field *int64
+ if v, err := iprot.ReadI64(); err != nil {
+ return err
+ } else {
+ _field = &v
+ }
+ p.Id = _field
+ return nil
+}
+func (p *TWorkloadSchedPolicy) ReadField2(iprot thrift.TProtocol) error {
+
+ var _field *string
+ if v, err := iprot.ReadString(); err != nil {
+ return err
+ } else {
+ _field = &v
+ }
+ p.Name = _field
+ return nil
+}
+func (p *TWorkloadSchedPolicy) ReadField3(iprot thrift.TProtocol) error {
+
+ var _field *int32
+ if v, err := iprot.ReadI32(); err != nil {
+ return err
+ } else {
+ _field = &v
+ }
+ p.Version = _field
+ return nil
+}
+func (p *TWorkloadSchedPolicy) ReadField4(iprot thrift.TProtocol) error {
+
+ var _field *int32
+ if v, err := iprot.ReadI32(); err != nil {
+ return err
+ } else {
+ _field = &v
+ }
+ p.Priority = _field
+ return nil
+}
+func (p *TWorkloadSchedPolicy) ReadField5(iprot thrift.TProtocol) error {
+
+ var _field *bool
+ if v, err := iprot.ReadBool(); err != nil {
+ return err
+ } else {
+ _field = &v
+ }
+ p.Enabled = _field
+ return nil
+}
+func (p *TWorkloadSchedPolicy) ReadField6(iprot thrift.TProtocol) error {
+ _, size, err := iprot.ReadListBegin()
+ if err != nil {
+ return err
+ }
+ _field := make([]*TWorkloadCondition, 0, size)
+ values := make([]TWorkloadCondition, size)
+ for i := 0; i < size; i++ {
+ _elem := &values[i]
+ _elem.InitDefault()
+
+ if err := _elem.Read(iprot); err != nil {
+ return err
+ }
+
+ _field = append(_field, _elem)
+ }
+ if err := iprot.ReadListEnd(); err != nil {
+ return err
+ }
+ p.ConditionList = _field
+ return nil
+}
+func (p *TWorkloadSchedPolicy) ReadField7(iprot thrift.TProtocol) error {
+ _, size, err := iprot.ReadListBegin()
+ if err != nil {
+ return err
+ }
+ _field := make([]*TWorkloadAction, 0, size)
+ values := make([]TWorkloadAction, size)
+ for i := 0; i < size; i++ {
+ _elem := &values[i]
+ _elem.InitDefault()
+
+ if err := _elem.Read(iprot); err != nil {
+ return err
+ }
+
+ _field = append(_field, _elem)
+ }
+ if err := iprot.ReadListEnd(); err != nil {
+ return err
+ }
+ p.ActionList = _field
+ return nil
+}
+func (p *TWorkloadSchedPolicy) ReadField8(iprot thrift.TProtocol) error {
+ _, size, err := iprot.ReadListBegin()
+ if err != nil {
+ return err
+ }
+ _field := make([]int64, 0, size)
+ for i := 0; i < size; i++ {
+
+ var _elem int64
+ if v, err := iprot.ReadI64(); err != nil {
+ return err
+ } else {
+ _elem = v
+ }
+
+ _field = append(_field, _elem)
+ }
+ if err := iprot.ReadListEnd(); err != nil {
+ return err
+ }
+ p.WgIdList = _field
+ return nil
+}
+
+func (p *TWorkloadSchedPolicy) Write(oprot thrift.TProtocol) (err error) {
+ var fieldId int16
+ if err = oprot.WriteStructBegin("TWorkloadSchedPolicy"); err != nil {
+ goto WriteStructBeginError
+ }
+ if p != nil {
+ if err = p.writeField1(oprot); err != nil {
+ fieldId = 1
+ goto WriteFieldError
+ }
+ if err = p.writeField2(oprot); err != nil {
+ fieldId = 2
+ goto WriteFieldError
+ }
+ if err = p.writeField3(oprot); err != nil {
+ fieldId = 3
+ goto WriteFieldError
+ }
+ if err = p.writeField4(oprot); err != nil {
+ fieldId = 4
+ goto WriteFieldError
+ }
+ if err = p.writeField5(oprot); err != nil {
+ fieldId = 5
+ goto WriteFieldError
+ }
+ if err = p.writeField6(oprot); err != nil {
+ fieldId = 6
+ goto WriteFieldError
+ }
+ if err = p.writeField7(oprot); err != nil {
+ fieldId = 7
+ goto WriteFieldError
+ }
+ if err = p.writeField8(oprot); err != nil {
+ fieldId = 8
+ goto WriteFieldError
+ }
+ }
+ if err = oprot.WriteFieldStop(); err != nil {
+ goto WriteFieldStopError
+ }
+ if err = oprot.WriteStructEnd(); err != nil {
+ goto WriteStructEndError
+ }
+ return nil
+WriteStructBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+WriteFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err)
+WriteFieldStopError:
+ return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err)
+WriteStructEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err)
+}
+
+func (p *TWorkloadSchedPolicy) writeField1(oprot thrift.TProtocol) (err error) {
+ if p.IsSetId() {
+ if err = oprot.WriteFieldBegin("id", thrift.I64, 1); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteI64(*p.Id); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err)
+}
+
+func (p *TWorkloadSchedPolicy) writeField2(oprot thrift.TProtocol) (err error) {
+ if p.IsSetName() {
+ if err = oprot.WriteFieldBegin("name", thrift.STRING, 2); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteString(*p.Name); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err)
+}
+
+func (p *TWorkloadSchedPolicy) writeField3(oprot thrift.TProtocol) (err error) {
+ if p.IsSetVersion() {
+ if err = oprot.WriteFieldBegin("version", thrift.I32, 3); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteI32(*p.Version); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err)
+}
+
+func (p *TWorkloadSchedPolicy) writeField4(oprot thrift.TProtocol) (err error) {
+ if p.IsSetPriority() {
+ if err = oprot.WriteFieldBegin("priority", thrift.I32, 4); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteI32(*p.Priority); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err)
+}
+
+func (p *TWorkloadSchedPolicy) writeField5(oprot thrift.TProtocol) (err error) {
+ if p.IsSetEnabled() {
+ if err = oprot.WriteFieldBegin("enabled", thrift.BOOL, 5); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteBool(*p.Enabled); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 5 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err)
+}
+
+func (p *TWorkloadSchedPolicy) writeField6(oprot thrift.TProtocol) (err error) {
+ if p.IsSetConditionList() {
+ if err = oprot.WriteFieldBegin("condition_list", thrift.LIST, 6); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteListBegin(thrift.STRUCT, len(p.ConditionList)); err != nil {
+ return err
+ }
+ for _, v := range p.ConditionList {
+ if err := v.Write(oprot); err != nil {
+ return err
+ }
+ }
+ if err := oprot.WriteListEnd(); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 6 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 6 end error: ", p), err)
+}
+
+func (p *TWorkloadSchedPolicy) writeField7(oprot thrift.TProtocol) (err error) {
+ if p.IsSetActionList() {
+ if err = oprot.WriteFieldBegin("action_list", thrift.LIST, 7); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteListBegin(thrift.STRUCT, len(p.ActionList)); err != nil {
+ return err
+ }
+ for _, v := range p.ActionList {
+ if err := v.Write(oprot); err != nil {
+ return err
+ }
+ }
+ if err := oprot.WriteListEnd(); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 7 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 7 end error: ", p), err)
+}
+
+func (p *TWorkloadSchedPolicy) writeField8(oprot thrift.TProtocol) (err error) {
+ if p.IsSetWgIdList() {
+ if err = oprot.WriteFieldBegin("wg_id_list", thrift.LIST, 8); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteListBegin(thrift.I64, len(p.WgIdList)); err != nil {
+ return err
+ }
+ for _, v := range p.WgIdList {
+ if err := oprot.WriteI64(v); err != nil {
+ return err
+ }
+ }
+ if err := oprot.WriteListEnd(); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 8 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 8 end error: ", p), err)
+}
+
+func (p *TWorkloadSchedPolicy) String() string {
+ if p == nil {
+ return ""
+ }
+ return fmt.Sprintf("TWorkloadSchedPolicy(%+v)", *p)
+
+}
+
+func (p *TWorkloadSchedPolicy) DeepEqual(ano *TWorkloadSchedPolicy) bool {
+ if p == ano {
+ return true
+ } else if p == nil || ano == nil {
+ return false
+ }
+ if !p.Field1DeepEqual(ano.Id) {
+ return false
+ }
+ if !p.Field2DeepEqual(ano.Name) {
+ return false
+ }
+ if !p.Field3DeepEqual(ano.Version) {
+ return false
+ }
+ if !p.Field4DeepEqual(ano.Priority) {
+ return false
+ }
+ if !p.Field5DeepEqual(ano.Enabled) {
+ return false
+ }
+ if !p.Field6DeepEqual(ano.ConditionList) {
+ return false
+ }
+ if !p.Field7DeepEqual(ano.ActionList) {
+ return false
+ }
+ if !p.Field8DeepEqual(ano.WgIdList) {
+ return false
+ }
+ return true
+}
+
+func (p *TWorkloadSchedPolicy) Field1DeepEqual(src *int64) bool {
+
+ if p.Id == src {
+ return true
+ } else if p.Id == nil || src == nil {
+ return false
+ }
+ if *p.Id != *src {
+ return false
+ }
+ return true
+}
+func (p *TWorkloadSchedPolicy) Field2DeepEqual(src *string) bool {
+
+ if p.Name == src {
+ return true
+ } else if p.Name == nil || src == nil {
+ return false
+ }
+ if strings.Compare(*p.Name, *src) != 0 {
+ return false
+ }
+ return true
+}
+func (p *TWorkloadSchedPolicy) Field3DeepEqual(src *int32) bool {
+
+ if p.Version == src {
+ return true
+ } else if p.Version == nil || src == nil {
+ return false
+ }
+ if *p.Version != *src {
+ return false
+ }
+ return true
+}
+func (p *TWorkloadSchedPolicy) Field4DeepEqual(src *int32) bool {
+
+ if p.Priority == src {
+ return true
+ } else if p.Priority == nil || src == nil {
+ return false
+ }
+ if *p.Priority != *src {
+ return false
+ }
+ return true
+}
+func (p *TWorkloadSchedPolicy) Field5DeepEqual(src *bool) bool {
+
+ if p.Enabled == src {
+ return true
+ } else if p.Enabled == nil || src == nil {
+ return false
+ }
+ if *p.Enabled != *src {
+ return false
+ }
+ return true
+}
+func (p *TWorkloadSchedPolicy) Field6DeepEqual(src []*TWorkloadCondition) bool {
+
+ if len(p.ConditionList) != len(src) {
+ return false
+ }
+ for i, v := range p.ConditionList {
+ _src := src[i]
+ if !v.DeepEqual(_src) {
+ return false
+ }
+ }
+ return true
+}
+func (p *TWorkloadSchedPolicy) Field7DeepEqual(src []*TWorkloadAction) bool {
+
+ if len(p.ActionList) != len(src) {
+ return false
+ }
+ for i, v := range p.ActionList {
+ _src := src[i]
+ if !v.DeepEqual(_src) {
+ return false
+ }
+ }
+ return true
+}
+func (p *TWorkloadSchedPolicy) Field8DeepEqual(src []int64) bool {
+
+ if len(p.WgIdList) != len(src) {
+ return false
+ }
+ for i, v := range p.WgIdList {
+ _src := src[i]
+ if v != _src {
+ return false
+ }
+ }
+ return true
+}
+
+type TopicInfo struct {
+ WorkloadGroupInfo *TWorkloadGroupInfo `thrift:"workload_group_info,1,optional" frugal:"1,optional,TWorkloadGroupInfo" json:"workload_group_info,omitempty"`
+ WorkloadSchedPolicy *TWorkloadSchedPolicy `thrift:"workload_sched_policy,2,optional" frugal:"2,optional,TWorkloadSchedPolicy" json:"workload_sched_policy,omitempty"`
+}
+
+func NewTopicInfo() *TopicInfo {
+ return &TopicInfo{}
+}
+
+func (p *TopicInfo) InitDefault() {
+}
+
+var TopicInfo_WorkloadGroupInfo_DEFAULT *TWorkloadGroupInfo
+
+func (p *TopicInfo) GetWorkloadGroupInfo() (v *TWorkloadGroupInfo) {
+ if !p.IsSetWorkloadGroupInfo() {
+ return TopicInfo_WorkloadGroupInfo_DEFAULT
+ }
+ return p.WorkloadGroupInfo
+}
+
+var TopicInfo_WorkloadSchedPolicy_DEFAULT *TWorkloadSchedPolicy
+
+func (p *TopicInfo) GetWorkloadSchedPolicy() (v *TWorkloadSchedPolicy) {
+ if !p.IsSetWorkloadSchedPolicy() {
+ return TopicInfo_WorkloadSchedPolicy_DEFAULT
+ }
+ return p.WorkloadSchedPolicy
+}
+func (p *TopicInfo) SetWorkloadGroupInfo(val *TWorkloadGroupInfo) {
+ p.WorkloadGroupInfo = val
+}
+func (p *TopicInfo) SetWorkloadSchedPolicy(val *TWorkloadSchedPolicy) {
+ p.WorkloadSchedPolicy = val
+}
+
+var fieldIDToName_TopicInfo = map[int16]string{
+ 1: "workload_group_info",
+ 2: "workload_sched_policy",
+}
+
+func (p *TopicInfo) IsSetWorkloadGroupInfo() bool {
+ return p.WorkloadGroupInfo != nil
+}
+
+func (p *TopicInfo) IsSetWorkloadSchedPolicy() bool {
+ return p.WorkloadSchedPolicy != nil
+}
+
+func (p *TopicInfo) Read(iprot thrift.TProtocol) (err error) {
+
+ var fieldTypeId thrift.TType
+ var fieldId int16
+
+ if _, err = iprot.ReadStructBegin(); err != nil {
+ goto ReadStructBeginError
+ }
+
+ for {
+ _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin()
+ if err != nil {
+ goto ReadFieldBeginError
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+
+ switch fieldId {
+ case 1:
+ if fieldTypeId == thrift.STRUCT {
+ if err = p.ReadField1(iprot); err != nil {
+ goto ReadFieldError
+ }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ case 2:
+ if fieldTypeId == thrift.STRUCT {
+ if err = p.ReadField2(iprot); err != nil {
+ goto ReadFieldError
+ }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ default:
+ if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ }
+ if err = iprot.ReadFieldEnd(); err != nil {
+ goto ReadFieldEndError
+ }
+ }
+ if err = iprot.ReadStructEnd(); err != nil {
+ goto ReadStructEndError
+ }
+
+ return nil
+ReadStructBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err)
+ReadFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
+ReadFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TopicInfo[fieldId]), err)
+SkipFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
+
+ReadFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err)
+ReadStructEndError:
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+}
+
+func (p *TopicInfo) ReadField1(iprot thrift.TProtocol) error {
+ _field := NewTWorkloadGroupInfo()
+ if err := _field.Read(iprot); err != nil {
+ return err
+ }
+ p.WorkloadGroupInfo = _field
+ return nil
+}
+func (p *TopicInfo) ReadField2(iprot thrift.TProtocol) error {
+ _field := NewTWorkloadSchedPolicy()
+ if err := _field.Read(iprot); err != nil {
+ return err
+ }
+ p.WorkloadSchedPolicy = _field
+ return nil
+}
+
+func (p *TopicInfo) Write(oprot thrift.TProtocol) (err error) {
+ var fieldId int16
+ if err = oprot.WriteStructBegin("TopicInfo"); err != nil {
+ goto WriteStructBeginError
+ }
+ if p != nil {
+ if err = p.writeField1(oprot); err != nil {
+ fieldId = 1
+ goto WriteFieldError
+ }
+ if err = p.writeField2(oprot); err != nil {
+ fieldId = 2
+ goto WriteFieldError
+ }
+ }
+ if err = oprot.WriteFieldStop(); err != nil {
+ goto WriteFieldStopError
+ }
+ if err = oprot.WriteStructEnd(); err != nil {
+ goto WriteStructEndError
+ }
+ return nil
+WriteStructBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+WriteFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err)
+WriteFieldStopError:
+ return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err)
+WriteStructEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err)
+}
+
+func (p *TopicInfo) writeField1(oprot thrift.TProtocol) (err error) {
+ if p.IsSetWorkloadGroupInfo() {
+ if err = oprot.WriteFieldBegin("workload_group_info", thrift.STRUCT, 1); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := p.WorkloadGroupInfo.Write(oprot); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err)
+}
+
+func (p *TopicInfo) writeField2(oprot thrift.TProtocol) (err error) {
+ if p.IsSetWorkloadSchedPolicy() {
+ if err = oprot.WriteFieldBegin("workload_sched_policy", thrift.STRUCT, 2); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := p.WorkloadSchedPolicy.Write(oprot); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err)
+}
+
+func (p *TopicInfo) String() string {
+ if p == nil {
+ return ""
+ }
+ return fmt.Sprintf("TopicInfo(%+v)", *p)
+
+}
+
+func (p *TopicInfo) DeepEqual(ano *TopicInfo) bool {
+ if p == ano {
+ return true
+ } else if p == nil || ano == nil {
+ return false
+ }
+ if !p.Field1DeepEqual(ano.WorkloadGroupInfo) {
+ return false
+ }
+ if !p.Field2DeepEqual(ano.WorkloadSchedPolicy) {
+ return false
+ }
+ return true
+}
+
+func (p *TopicInfo) Field1DeepEqual(src *TWorkloadGroupInfo) bool {
+
+ if !p.WorkloadGroupInfo.DeepEqual(src) {
+ return false
+ }
+ return true
+}
+func (p *TopicInfo) Field2DeepEqual(src *TWorkloadSchedPolicy) bool {
+
+ if !p.WorkloadSchedPolicy.DeepEqual(src) {
+ return false
+ }
+ return true
+}
+
+type TPublishTopicRequest struct {
+ TopicMap map[TTopicInfoType][]*TopicInfo `thrift:"topic_map,1,required" frugal:"1,required,map>" json:"topic_map"`
+}
+
+func NewTPublishTopicRequest() *TPublishTopicRequest {
+ return &TPublishTopicRequest{}
+}
+
+func (p *TPublishTopicRequest) InitDefault() {
+}
+
+func (p *TPublishTopicRequest) GetTopicMap() (v map[TTopicInfoType][]*TopicInfo) {
+ return p.TopicMap
+}
+func (p *TPublishTopicRequest) SetTopicMap(val map[TTopicInfoType][]*TopicInfo) {
+ p.TopicMap = val
+}
+
+var fieldIDToName_TPublishTopicRequest = map[int16]string{
+ 1: "topic_map",
+}
+
+func (p *TPublishTopicRequest) Read(iprot thrift.TProtocol) (err error) {
+
+ var fieldTypeId thrift.TType
+ var fieldId int16
+ var issetTopicMap bool = false
+
+ if _, err = iprot.ReadStructBegin(); err != nil {
+ goto ReadStructBeginError
+ }
+
+ for {
+ _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin()
+ if err != nil {
+ goto ReadFieldBeginError
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+
+ switch fieldId {
+ case 1:
+ if fieldTypeId == thrift.MAP {
+ if err = p.ReadField1(iprot); err != nil {
+ goto ReadFieldError
+ }
+ issetTopicMap = true
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ default:
+ if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ }
+ if err = iprot.ReadFieldEnd(); err != nil {
+ goto ReadFieldEndError
+ }
+ }
+ if err = iprot.ReadStructEnd(); err != nil {
+ goto ReadStructEndError
+ }
+
+ if !issetTopicMap {
+ fieldId = 1
+ goto RequiredFieldNotSetError
+ }
+ return nil
+ReadStructBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err)
+ReadFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
+ReadFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TPublishTopicRequest[fieldId]), err)
+SkipFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
+
+ReadFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err)
+ReadStructEndError:
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+RequiredFieldNotSetError:
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TPublishTopicRequest[fieldId]))
+}
+
+func (p *TPublishTopicRequest) ReadField1(iprot thrift.TProtocol) error {
+ _, _, size, err := iprot.ReadMapBegin()
+ if err != nil {
+ return err
+ }
+ _field := make(map[TTopicInfoType][]*TopicInfo, size)
+ for i := 0; i < size; i++ {
+ var _key TTopicInfoType
+ if v, err := iprot.ReadI32(); err != nil {
+ return err
+ } else {
+ _key = TTopicInfoType(v)
+ }
+ _, size, err := iprot.ReadListBegin()
+ if err != nil {
+ return err
+ }
+ _val := make([]*TopicInfo, 0, size)
+ values := make([]TopicInfo, size)
+ for i := 0; i < size; i++ {
+ _elem := &values[i]
+ _elem.InitDefault()
+
+ if err := _elem.Read(iprot); err != nil {
+ return err
+ }
+
+ _val = append(_val, _elem)
+ }
+ if err := iprot.ReadListEnd(); err != nil {
+ return err
+ }
+
+ _field[_key] = _val
+ }
+ if err := iprot.ReadMapEnd(); err != nil {
+ return err
+ }
+ p.TopicMap = _field
+ return nil
+}
+
+func (p *TPublishTopicRequest) Write(oprot thrift.TProtocol) (err error) {
+ var fieldId int16
+ if err = oprot.WriteStructBegin("TPublishTopicRequest"); err != nil {
+ goto WriteStructBeginError
+ }
+ if p != nil {
+ if err = p.writeField1(oprot); err != nil {
+ fieldId = 1
+ goto WriteFieldError
+ }
+ }
+ if err = oprot.WriteFieldStop(); err != nil {
+ goto WriteFieldStopError
+ }
+ if err = oprot.WriteStructEnd(); err != nil {
+ goto WriteStructEndError
+ }
+ return nil
+WriteStructBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+WriteFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err)
+WriteFieldStopError:
+ return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err)
+WriteStructEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err)
+}
+
+func (p *TPublishTopicRequest) writeField1(oprot thrift.TProtocol) (err error) {
+ if err = oprot.WriteFieldBegin("topic_map", thrift.MAP, 1); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteMapBegin(thrift.I32, thrift.LIST, len(p.TopicMap)); err != nil {
+ return err
+ }
+ for k, v := range p.TopicMap {
+ if err := oprot.WriteI32(int32(k)); err != nil {
+ return err
+ }
+ if err := oprot.WriteListBegin(thrift.STRUCT, len(v)); err != nil {
+ return err
+ }
+ for _, v := range v {
+ if err := v.Write(oprot); err != nil {
+ return err
+ }
+ }
+ if err := oprot.WriteListEnd(); err != nil {
+ return err
+ }
+ }
+ if err := oprot.WriteMapEnd(); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err)
+}
+
+func (p *TPublishTopicRequest) String() string {
+ if p == nil {
+ return ""
+ }
+ return fmt.Sprintf("TPublishTopicRequest(%+v)", *p)
+
+}
+
+func (p *TPublishTopicRequest) DeepEqual(ano *TPublishTopicRequest) bool {
+ if p == ano {
+ return true
+ } else if p == nil || ano == nil {
+ return false
+ }
+ if !p.Field1DeepEqual(ano.TopicMap) {
+ return false
+ }
+ return true
+}
+
+func (p *TPublishTopicRequest) Field1DeepEqual(src map[TTopicInfoType][]*TopicInfo) bool {
+
+ if len(p.TopicMap) != len(src) {
+ return false
+ }
+ for k, v := range p.TopicMap {
+ _src := src[k]
+ if len(v) != len(_src) {
+ return false
+ }
+ for i, v := range v {
+ _src1 := _src[i]
+ if !v.DeepEqual(_src1) {
+ return false
+ }
+ }
+ }
+ return true
+}
+
+type TPublishTopicResult_ struct {
+ Status *status.TStatus `thrift:"status,1,required" frugal:"1,required,status.TStatus" json:"status"`
+}
+
+func NewTPublishTopicResult_() *TPublishTopicResult_ {
+ return &TPublishTopicResult_{}
+}
+
+func (p *TPublishTopicResult_) InitDefault() {
+}
+
+var TPublishTopicResult__Status_DEFAULT *status.TStatus
+
+func (p *TPublishTopicResult_) GetStatus() (v *status.TStatus) {
+ if !p.IsSetStatus() {
+ return TPublishTopicResult__Status_DEFAULT
+ }
+ return p.Status
+}
+func (p *TPublishTopicResult_) SetStatus(val *status.TStatus) {
+ p.Status = val
+}
+
+var fieldIDToName_TPublishTopicResult_ = map[int16]string{
+ 1: "status",
+}
+
+func (p *TPublishTopicResult_) IsSetStatus() bool {
+ return p.Status != nil
+}
+
+func (p *TPublishTopicResult_) Read(iprot thrift.TProtocol) (err error) {
+
+ var fieldTypeId thrift.TType
+ var fieldId int16
+ var issetStatus bool = false
+
+ if _, err = iprot.ReadStructBegin(); err != nil {
+ goto ReadStructBeginError
+ }
+
+ for {
+ _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin()
+ if err != nil {
+ goto ReadFieldBeginError
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+
+ switch fieldId {
+ case 1:
+ if fieldTypeId == thrift.STRUCT {
+ if err = p.ReadField1(iprot); err != nil {
+ goto ReadFieldError
+ }
+ issetStatus = true
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ default:
+ if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ }
+ if err = iprot.ReadFieldEnd(); err != nil {
+ goto ReadFieldEndError
+ }
+ }
+ if err = iprot.ReadStructEnd(); err != nil {
+ goto ReadStructEndError
+ }
+
+ if !issetStatus {
+ fieldId = 1
+ goto RequiredFieldNotSetError
+ }
+ return nil
+ReadStructBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err)
+ReadFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
+ReadFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TPublishTopicResult_[fieldId]), err)
+SkipFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
+
+ReadFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err)
+ReadStructEndError:
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+RequiredFieldNotSetError:
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TPublishTopicResult_[fieldId]))
+}
+
+func (p *TPublishTopicResult_) ReadField1(iprot thrift.TProtocol) error {
+ _field := status.NewTStatus()
+ if err := _field.Read(iprot); err != nil {
+ return err
+ }
+ p.Status = _field
+ return nil
+}
+
+func (p *TPublishTopicResult_) Write(oprot thrift.TProtocol) (err error) {
+ var fieldId int16
+ if err = oprot.WriteStructBegin("TPublishTopicResult"); err != nil {
+ goto WriteStructBeginError
+ }
+ if p != nil {
+ if err = p.writeField1(oprot); err != nil {
+ fieldId = 1
+ goto WriteFieldError
+ }
+ }
+ if err = oprot.WriteFieldStop(); err != nil {
+ goto WriteFieldStopError
+ }
+ if err = oprot.WriteStructEnd(); err != nil {
+ goto WriteStructEndError
+ }
+ return nil
+WriteStructBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+WriteFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err)
+WriteFieldStopError:
+ return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err)
+WriteStructEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err)
+}
+
+func (p *TPublishTopicResult_) writeField1(oprot thrift.TProtocol) (err error) {
+ if err = oprot.WriteFieldBegin("status", thrift.STRUCT, 1); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := p.Status.Write(oprot); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err)
+}
+
+func (p *TPublishTopicResult_) String() string {
+ if p == nil {
+ return ""
+ }
+ return fmt.Sprintf("TPublishTopicResult_(%+v)", *p)
+
+}
+
+func (p *TPublishTopicResult_) DeepEqual(ano *TPublishTopicResult_) bool {
+ if p == ano {
+ return true
+ } else if p == nil || ano == nil {
+ return false
+ }
+ if !p.Field1DeepEqual(ano.Status) {
+ return false
+ }
+ return true
+}
+
+func (p *TPublishTopicResult_) Field1DeepEqual(src *status.TStatus) bool {
+
+ if !p.Status.DeepEqual(src) {
+ return false
+ }
+ return true
+}
+
+type TGetRealtimeExecStatusRequest struct {
+ Id *types.TUniqueId `thrift:"id,1,optional" frugal:"1,optional,types.TUniqueId" json:"id,omitempty"`
+}
+
+func NewTGetRealtimeExecStatusRequest() *TGetRealtimeExecStatusRequest {
+ return &TGetRealtimeExecStatusRequest{}
+}
+
+func (p *TGetRealtimeExecStatusRequest) InitDefault() {
+}
+
+var TGetRealtimeExecStatusRequest_Id_DEFAULT *types.TUniqueId
+
+func (p *TGetRealtimeExecStatusRequest) GetId() (v *types.TUniqueId) {
+ if !p.IsSetId() {
+ return TGetRealtimeExecStatusRequest_Id_DEFAULT
+ }
+ return p.Id
+}
+func (p *TGetRealtimeExecStatusRequest) SetId(val *types.TUniqueId) {
+ p.Id = val
+}
+
+var fieldIDToName_TGetRealtimeExecStatusRequest = map[int16]string{
+ 1: "id",
+}
+
+func (p *TGetRealtimeExecStatusRequest) IsSetId() bool {
+ return p.Id != nil
+}
+
+func (p *TGetRealtimeExecStatusRequest) Read(iprot thrift.TProtocol) (err error) {
+
+ var fieldTypeId thrift.TType
+ var fieldId int16
+
+ if _, err = iprot.ReadStructBegin(); err != nil {
+ goto ReadStructBeginError
+ }
+
+ for {
+ _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin()
+ if err != nil {
+ goto ReadFieldBeginError
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+
+ switch fieldId {
+ case 1:
+ if fieldTypeId == thrift.STRUCT {
+ if err = p.ReadField1(iprot); err != nil {
+ goto ReadFieldError
+ }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ default:
+ if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ }
+ if err = iprot.ReadFieldEnd(); err != nil {
+ goto ReadFieldEndError
+ }
+ }
+ if err = iprot.ReadStructEnd(); err != nil {
+ goto ReadStructEndError
+ }
+
+ return nil
+ReadStructBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err)
+ReadFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
+ReadFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TGetRealtimeExecStatusRequest[fieldId]), err)
+SkipFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
+
+ReadFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err)
+ReadStructEndError:
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+}
+
+func (p *TGetRealtimeExecStatusRequest) ReadField1(iprot thrift.TProtocol) error {
+ _field := types.NewTUniqueId()
+ if err := _field.Read(iprot); err != nil {
+ return err
+ }
+ p.Id = _field
+ return nil
+}
+
+func (p *TGetRealtimeExecStatusRequest) Write(oprot thrift.TProtocol) (err error) {
+ var fieldId int16
+ if err = oprot.WriteStructBegin("TGetRealtimeExecStatusRequest"); err != nil {
+ goto WriteStructBeginError
+ }
+ if p != nil {
+ if err = p.writeField1(oprot); err != nil {
+ fieldId = 1
+ goto WriteFieldError
+ }
+ }
+ if err = oprot.WriteFieldStop(); err != nil {
+ goto WriteFieldStopError
+ }
+ if err = oprot.WriteStructEnd(); err != nil {
+ goto WriteStructEndError
+ }
+ return nil
+WriteStructBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+WriteFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err)
+WriteFieldStopError:
+ return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err)
+WriteStructEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err)
+}
+
+func (p *TGetRealtimeExecStatusRequest) writeField1(oprot thrift.TProtocol) (err error) {
+ if p.IsSetId() {
+ if err = oprot.WriteFieldBegin("id", thrift.STRUCT, 1); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := p.Id.Write(oprot); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err)
+}
+
+func (p *TGetRealtimeExecStatusRequest) String() string {
+ if p == nil {
+ return ""
+ }
+ return fmt.Sprintf("TGetRealtimeExecStatusRequest(%+v)", *p)
+
+}
+
+func (p *TGetRealtimeExecStatusRequest) DeepEqual(ano *TGetRealtimeExecStatusRequest) bool {
+ if p == ano {
+ return true
+ } else if p == nil || ano == nil {
+ return false
+ }
+ if !p.Field1DeepEqual(ano.Id) {
+ return false
+ }
+ return true
+}
+
+func (p *TGetRealtimeExecStatusRequest) Field1DeepEqual(src *types.TUniqueId) bool {
+
+ if !p.Id.DeepEqual(src) {
+ return false
+ }
+ return true
+}
+
+type TGetRealtimeExecStatusResponse struct {
+ Status *status.TStatus `thrift:"status,1,optional" frugal:"1,optional,status.TStatus" json:"status,omitempty"`
+ ReportExecStatusParams *frontendservice.TReportExecStatusParams `thrift:"report_exec_status_params,2,optional" frugal:"2,optional,frontendservice.TReportExecStatusParams" json:"report_exec_status_params,omitempty"`
+}
+
+func NewTGetRealtimeExecStatusResponse() *TGetRealtimeExecStatusResponse {
+ return &TGetRealtimeExecStatusResponse{}
+}
+
+func (p *TGetRealtimeExecStatusResponse) InitDefault() {
+}
+
+var TGetRealtimeExecStatusResponse_Status_DEFAULT *status.TStatus
+
+func (p *TGetRealtimeExecStatusResponse) GetStatus() (v *status.TStatus) {
+ if !p.IsSetStatus() {
+ return TGetRealtimeExecStatusResponse_Status_DEFAULT
+ }
+ return p.Status
+}
+
+var TGetRealtimeExecStatusResponse_ReportExecStatusParams_DEFAULT *frontendservice.TReportExecStatusParams
+
+func (p *TGetRealtimeExecStatusResponse) GetReportExecStatusParams() (v *frontendservice.TReportExecStatusParams) {
+ if !p.IsSetReportExecStatusParams() {
+ return TGetRealtimeExecStatusResponse_ReportExecStatusParams_DEFAULT
+ }
+ return p.ReportExecStatusParams
+}
+func (p *TGetRealtimeExecStatusResponse) SetStatus(val *status.TStatus) {
+ p.Status = val
+}
+func (p *TGetRealtimeExecStatusResponse) SetReportExecStatusParams(val *frontendservice.TReportExecStatusParams) {
+ p.ReportExecStatusParams = val
+}
+
+var fieldIDToName_TGetRealtimeExecStatusResponse = map[int16]string{
+ 1: "status",
+ 2: "report_exec_status_params",
+}
+
+func (p *TGetRealtimeExecStatusResponse) IsSetStatus() bool {
+ return p.Status != nil
+}
+
+func (p *TGetRealtimeExecStatusResponse) IsSetReportExecStatusParams() bool {
+ return p.ReportExecStatusParams != nil
+}
+
+func (p *TGetRealtimeExecStatusResponse) Read(iprot thrift.TProtocol) (err error) {
+
+ var fieldTypeId thrift.TType
+ var fieldId int16
+
+ if _, err = iprot.ReadStructBegin(); err != nil {
+ goto ReadStructBeginError
+ }
+
+ for {
+ _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin()
+ if err != nil {
+ goto ReadFieldBeginError
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+
+ switch fieldId {
+ case 1:
+ if fieldTypeId == thrift.STRUCT {
+ if err = p.ReadField1(iprot); err != nil {
+ goto ReadFieldError
+ }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ case 2:
+ if fieldTypeId == thrift.STRUCT {
+ if err = p.ReadField2(iprot); err != nil {
+ goto ReadFieldError
+ }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ default:
+ if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ }
+ if err = iprot.ReadFieldEnd(); err != nil {
+ goto ReadFieldEndError
+ }
+ }
+ if err = iprot.ReadStructEnd(); err != nil {
+ goto ReadStructEndError
+ }
+
+ return nil
+ReadStructBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err)
+ReadFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
+ReadFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TGetRealtimeExecStatusResponse[fieldId]), err)
+SkipFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
+
+ReadFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err)
+ReadStructEndError:
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+}
+
+func (p *TGetRealtimeExecStatusResponse) ReadField1(iprot thrift.TProtocol) error {
+ _field := status.NewTStatus()
+ if err := _field.Read(iprot); err != nil {
+ return err
+ }
+ p.Status = _field
+ return nil
+}
+func (p *TGetRealtimeExecStatusResponse) ReadField2(iprot thrift.TProtocol) error {
+ _field := frontendservice.NewTReportExecStatusParams()
+ if err := _field.Read(iprot); err != nil {
+ return err
+ }
+ p.ReportExecStatusParams = _field
+ return nil
+}
+
+func (p *TGetRealtimeExecStatusResponse) Write(oprot thrift.TProtocol) (err error) {
+ var fieldId int16
+ if err = oprot.WriteStructBegin("TGetRealtimeExecStatusResponse"); err != nil {
+ goto WriteStructBeginError
+ }
+ if p != nil {
+ if err = p.writeField1(oprot); err != nil {
+ fieldId = 1
+ goto WriteFieldError
+ }
+ if err = p.writeField2(oprot); err != nil {
+ fieldId = 2
+ goto WriteFieldError
+ }
+ }
+ if err = oprot.WriteFieldStop(); err != nil {
+ goto WriteFieldStopError
+ }
+ if err = oprot.WriteStructEnd(); err != nil {
+ goto WriteStructEndError
+ }
+ return nil
+WriteStructBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+WriteFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err)
+WriteFieldStopError:
+ return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err)
+WriteStructEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err)
+}
+
+func (p *TGetRealtimeExecStatusResponse) writeField1(oprot thrift.TProtocol) (err error) {
+ if p.IsSetStatus() {
+ if err = oprot.WriteFieldBegin("status", thrift.STRUCT, 1); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := p.Status.Write(oprot); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err)
+}
+
+func (p *TGetRealtimeExecStatusResponse) writeField2(oprot thrift.TProtocol) (err error) {
+ if p.IsSetReportExecStatusParams() {
+ if err = oprot.WriteFieldBegin("report_exec_status_params", thrift.STRUCT, 2); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := p.ReportExecStatusParams.Write(oprot); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err)
+}
+
+func (p *TGetRealtimeExecStatusResponse) String() string {
+ if p == nil {
+ return ""
+ }
+ return fmt.Sprintf("TGetRealtimeExecStatusResponse(%+v)", *p)
+
+}
+
+func (p *TGetRealtimeExecStatusResponse) DeepEqual(ano *TGetRealtimeExecStatusResponse) bool {
+ if p == ano {
+ return true
+ } else if p == nil || ano == nil {
+ return false
+ }
+ if !p.Field1DeepEqual(ano.Status) {
+ return false
+ }
+ if !p.Field2DeepEqual(ano.ReportExecStatusParams) {
+ return false
+ }
+ return true
+}
+
+func (p *TGetRealtimeExecStatusResponse) Field1DeepEqual(src *status.TStatus) bool {
+
+ if !p.Status.DeepEqual(src) {
+ return false
+ }
+ return true
+}
+func (p *TGetRealtimeExecStatusResponse) Field2DeepEqual(src *frontendservice.TReportExecStatusParams) bool {
+
+ if !p.ReportExecStatusParams.DeepEqual(src) {
+ return false
+ }
+ return true
+}
+
+type BackendService interface {
+ ExecPlanFragment(ctx context.Context, params *palointernalservice.TExecPlanFragmentParams) (r *palointernalservice.TExecPlanFragmentResult_, err error)
+
+ CancelPlanFragment(ctx context.Context, params *palointernalservice.TCancelPlanFragmentParams) (r *palointernalservice.TCancelPlanFragmentResult_, err error)
+
+ TransmitData(ctx context.Context, params *palointernalservice.TTransmitDataParams) (r *palointernalservice.TTransmitDataResult_, err error)
+
+ SubmitTasks(ctx context.Context, tasks []*agentservice.TAgentTaskRequest) (r *agentservice.TAgentResult_, err error)
+
+ MakeSnapshot(ctx context.Context, snapshotRequest *agentservice.TSnapshotRequest) (r *agentservice.TAgentResult_, err error)
+
+ ReleaseSnapshot(ctx context.Context, snapshotPath string) (r *agentservice.TAgentResult_, err error)
+
+ PublishClusterState(ctx context.Context, request *agentservice.TAgentPublishRequest) (r *agentservice.TAgentResult_, err error)
+
+ SubmitExportTask(ctx context.Context, request *TExportTaskRequest) (r *status.TStatus, err error)
+
+ GetExportStatus(ctx context.Context, taskId *types.TUniqueId) (r *palointernalservice.TExportStatusResult_, err error)
+
+ EraseExportTask(ctx context.Context, taskId *types.TUniqueId) (r *status.TStatus, err error)
+
+ GetTabletStat(ctx context.Context) (r *TTabletStatResult_, err error)
+
+ GetTrashUsedCapacity(ctx context.Context) (r int64, err error)
+
+ GetDiskTrashUsedCapacity(ctx context.Context) (r []*TDiskTrashInfo, err error)
+
+ SubmitRoutineLoadTask(ctx context.Context, tasks []*TRoutineLoadTask) (r *status.TStatus, err error)
+
+ OpenScanner(ctx context.Context, params *dorisexternalservice.TScanOpenParams) (r *dorisexternalservice.TScanOpenResult_, err error)
+
+ GetNext(ctx context.Context, params *dorisexternalservice.TScanNextBatchParams) (r *dorisexternalservice.TScanBatchResult_, err error)
+
+ CloseScanner(ctx context.Context, params *dorisexternalservice.TScanCloseParams) (r *dorisexternalservice.TScanCloseResult_, err error)
+
+ GetStreamLoadRecord(ctx context.Context, lastStreamRecordTime int64) (r *TStreamLoadRecordResult_, err error)
+
+ CheckStorageFormat(ctx context.Context) (r *TCheckStorageFormatResult_, err error)
+
+ WarmUpCacheAsync(ctx context.Context, request *TWarmUpCacheAsyncRequest) (r *TWarmUpCacheAsyncResponse, err error)
+
+ CheckWarmUpCacheAsync(ctx context.Context, request *TCheckWarmUpCacheAsyncRequest) (r *TCheckWarmUpCacheAsyncResponse, err error)
+
+ SyncLoadForTablets(ctx context.Context, request *TSyncLoadForTabletsRequest) (r *TSyncLoadForTabletsResponse, err error)
+
+ GetTopNHotPartitions(ctx context.Context, request *TGetTopNHotPartitionsRequest) (r *TGetTopNHotPartitionsResponse, err error)
+
+ WarmUpTablets(ctx context.Context, request *TWarmUpTabletsRequest) (r *TWarmUpTabletsResponse, err error)
+
+ IngestBinlog(ctx context.Context, ingestBinlogRequest *TIngestBinlogRequest) (r *TIngestBinlogResult_, err error)
+
+ QueryIngestBinlog(ctx context.Context, queryIngestBinlogRequest *TQueryIngestBinlogRequest) (r *TQueryIngestBinlogResult_, err error)
+
+ PublishTopicInfo(ctx context.Context, topicRequest *TPublishTopicRequest) (r *TPublishTopicResult_, err error)
+
+ GetRealtimeExecStatus(ctx context.Context, request *TGetRealtimeExecStatusRequest) (r *TGetRealtimeExecStatusResponse, err error)
+}
+
+type BackendServiceClient struct {
+ c thrift.TClient
+}
+
+func NewBackendServiceClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *BackendServiceClient {
+ return &BackendServiceClient{
+ c: thrift.NewTStandardClient(f.GetProtocol(t), f.GetProtocol(t)),
+ }
+}
+
+func NewBackendServiceClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *BackendServiceClient {
+ return &BackendServiceClient{
+ c: thrift.NewTStandardClient(iprot, oprot),
+ }
+}
+
+func NewBackendServiceClient(c thrift.TClient) *BackendServiceClient {
+ return &BackendServiceClient{
+ c: c,
+ }
+}
+
+func (p *BackendServiceClient) Client_() thrift.TClient {
+ return p.c
+}
+
+func (p *BackendServiceClient) ExecPlanFragment(ctx context.Context, params *palointernalservice.TExecPlanFragmentParams) (r *palointernalservice.TExecPlanFragmentResult_, err error) {
+ var _args BackendServiceExecPlanFragmentArgs
+ _args.Params = params
+ var _result BackendServiceExecPlanFragmentResult
+ if err = p.Client_().Call(ctx, "exec_plan_fragment", &_args, &_result); err != nil {
+ return
+ }
+ return _result.GetSuccess(), nil
+}
+func (p *BackendServiceClient) CancelPlanFragment(ctx context.Context, params *palointernalservice.TCancelPlanFragmentParams) (r *palointernalservice.TCancelPlanFragmentResult_, err error) {
+ var _args BackendServiceCancelPlanFragmentArgs
+ _args.Params = params
+ var _result BackendServiceCancelPlanFragmentResult
+ if err = p.Client_().Call(ctx, "cancel_plan_fragment", &_args, &_result); err != nil {
+ return
+ }
+ return _result.GetSuccess(), nil
+}
+func (p *BackendServiceClient) TransmitData(ctx context.Context, params *palointernalservice.TTransmitDataParams) (r *palointernalservice.TTransmitDataResult_, err error) {
+ var _args BackendServiceTransmitDataArgs
+ _args.Params = params
+ var _result BackendServiceTransmitDataResult
+ if err = p.Client_().Call(ctx, "transmit_data", &_args, &_result); err != nil {
+ return
+ }
+ return _result.GetSuccess(), nil
+}
+func (p *BackendServiceClient) SubmitTasks(ctx context.Context, tasks []*agentservice.TAgentTaskRequest) (r *agentservice.TAgentResult_, err error) {
+ var _args BackendServiceSubmitTasksArgs
+ _args.Tasks = tasks
+ var _result BackendServiceSubmitTasksResult
+ if err = p.Client_().Call(ctx, "submit_tasks", &_args, &_result); err != nil {
+ return
+ }
+ return _result.GetSuccess(), nil
+}
+func (p *BackendServiceClient) MakeSnapshot(ctx context.Context, snapshotRequest *agentservice.TSnapshotRequest) (r *agentservice.TAgentResult_, err error) {
+ var _args BackendServiceMakeSnapshotArgs
+ _args.SnapshotRequest = snapshotRequest
+ var _result BackendServiceMakeSnapshotResult
+ if err = p.Client_().Call(ctx, "make_snapshot", &_args, &_result); err != nil {
+ return
+ }
+ return _result.GetSuccess(), nil
+}
+func (p *BackendServiceClient) ReleaseSnapshot(ctx context.Context, snapshotPath string) (r *agentservice.TAgentResult_, err error) {
+ var _args BackendServiceReleaseSnapshotArgs
+ _args.SnapshotPath = snapshotPath
+ var _result BackendServiceReleaseSnapshotResult
+ if err = p.Client_().Call(ctx, "release_snapshot", &_args, &_result); err != nil {
+ return
+ }
+ return _result.GetSuccess(), nil
+}
+func (p *BackendServiceClient) PublishClusterState(ctx context.Context, request *agentservice.TAgentPublishRequest) (r *agentservice.TAgentResult_, err error) {
+ var _args BackendServicePublishClusterStateArgs
+ _args.Request = request
+ var _result BackendServicePublishClusterStateResult
+ if err = p.Client_().Call(ctx, "publish_cluster_state", &_args, &_result); err != nil {
+ return
+ }
+ return _result.GetSuccess(), nil
+}
+func (p *BackendServiceClient) SubmitExportTask(ctx context.Context, request *TExportTaskRequest) (r *status.TStatus, err error) {
+ var _args BackendServiceSubmitExportTaskArgs
+ _args.Request = request
+ var _result BackendServiceSubmitExportTaskResult
+ if err = p.Client_().Call(ctx, "submit_export_task", &_args, &_result); err != nil {
+ return
+ }
+ return _result.GetSuccess(), nil
+}
+func (p *BackendServiceClient) GetExportStatus(ctx context.Context, taskId *types.TUniqueId) (r *palointernalservice.TExportStatusResult_, err error) {
+ var _args BackendServiceGetExportStatusArgs
+ _args.TaskId = taskId
+ var _result BackendServiceGetExportStatusResult
+ if err = p.Client_().Call(ctx, "get_export_status", &_args, &_result); err != nil {
+ return
+ }
+ return _result.GetSuccess(), nil
+}
+func (p *BackendServiceClient) EraseExportTask(ctx context.Context, taskId *types.TUniqueId) (r *status.TStatus, err error) {
+ var _args BackendServiceEraseExportTaskArgs
+ _args.TaskId = taskId
+ var _result BackendServiceEraseExportTaskResult
+ if err = p.Client_().Call(ctx, "erase_export_task", &_args, &_result); err != nil {
+ return
+ }
+ return _result.GetSuccess(), nil
+}
+func (p *BackendServiceClient) GetTabletStat(ctx context.Context) (r *TTabletStatResult_, err error) {
+ var _args BackendServiceGetTabletStatArgs
+ var _result BackendServiceGetTabletStatResult
+ if err = p.Client_().Call(ctx, "get_tablet_stat", &_args, &_result); err != nil {
+ return
+ }
+ return _result.GetSuccess(), nil
+}
+func (p *BackendServiceClient) GetTrashUsedCapacity(ctx context.Context) (r int64, err error) {
+ var _args BackendServiceGetTrashUsedCapacityArgs
+ var _result BackendServiceGetTrashUsedCapacityResult
+ if err = p.Client_().Call(ctx, "get_trash_used_capacity", &_args, &_result); err != nil {
+ return
+ }
+ return _result.GetSuccess(), nil
+}
+func (p *BackendServiceClient) GetDiskTrashUsedCapacity(ctx context.Context) (r []*TDiskTrashInfo, err error) {
+ var _args BackendServiceGetDiskTrashUsedCapacityArgs
+ var _result BackendServiceGetDiskTrashUsedCapacityResult
+ if err = p.Client_().Call(ctx, "get_disk_trash_used_capacity", &_args, &_result); err != nil {
+ return
+ }
+ return _result.GetSuccess(), nil
+}
+func (p *BackendServiceClient) SubmitRoutineLoadTask(ctx context.Context, tasks []*TRoutineLoadTask) (r *status.TStatus, err error) {
+ var _args BackendServiceSubmitRoutineLoadTaskArgs
+ _args.Tasks = tasks
+ var _result BackendServiceSubmitRoutineLoadTaskResult
+ if err = p.Client_().Call(ctx, "submit_routine_load_task", &_args, &_result); err != nil {
+ return
+ }
+ return _result.GetSuccess(), nil
+}
+func (p *BackendServiceClient) OpenScanner(ctx context.Context, params *dorisexternalservice.TScanOpenParams) (r *dorisexternalservice.TScanOpenResult_, err error) {
+ var _args BackendServiceOpenScannerArgs
+ _args.Params = params
+ var _result BackendServiceOpenScannerResult
+ if err = p.Client_().Call(ctx, "open_scanner", &_args, &_result); err != nil {
+ return
+ }
+ return _result.GetSuccess(), nil
+}
+func (p *BackendServiceClient) GetNext(ctx context.Context, params *dorisexternalservice.TScanNextBatchParams) (r *dorisexternalservice.TScanBatchResult_, err error) {
+ var _args BackendServiceGetNextArgs
+ _args.Params = params
+ var _result BackendServiceGetNextResult
+ if err = p.Client_().Call(ctx, "get_next", &_args, &_result); err != nil {
+ return
+ }
+ return _result.GetSuccess(), nil
+}
+func (p *BackendServiceClient) CloseScanner(ctx context.Context, params *dorisexternalservice.TScanCloseParams) (r *dorisexternalservice.TScanCloseResult_, err error) {
+ var _args BackendServiceCloseScannerArgs
+ _args.Params = params
+ var _result BackendServiceCloseScannerResult
+ if err = p.Client_().Call(ctx, "close_scanner", &_args, &_result); err != nil {
+ return
+ }
+ return _result.GetSuccess(), nil
+}
+func (p *BackendServiceClient) GetStreamLoadRecord(ctx context.Context, lastStreamRecordTime int64) (r *TStreamLoadRecordResult_, err error) {
var _args BackendServiceGetStreamLoadRecordArgs
_args.LastStreamRecordTime = lastStreamRecordTime
var _result BackendServiceGetStreamLoadRecordResult
if err = p.Client_().Call(ctx, "get_stream_load_record", &_args, &_result); err != nil {
return
}
- return _result.GetSuccess(), nil
+ return _result.GetSuccess(), nil
+}
+func (p *BackendServiceClient) CheckStorageFormat(ctx context.Context) (r *TCheckStorageFormatResult_, err error) {
+ var _args BackendServiceCheckStorageFormatArgs
+ var _result BackendServiceCheckStorageFormatResult
+ if err = p.Client_().Call(ctx, "check_storage_format", &_args, &_result); err != nil {
+ return
+ }
+ return _result.GetSuccess(), nil
+}
+func (p *BackendServiceClient) WarmUpCacheAsync(ctx context.Context, request *TWarmUpCacheAsyncRequest) (r *TWarmUpCacheAsyncResponse, err error) {
+ var _args BackendServiceWarmUpCacheAsyncArgs
+ _args.Request = request
+ var _result BackendServiceWarmUpCacheAsyncResult
+ if err = p.Client_().Call(ctx, "warm_up_cache_async", &_args, &_result); err != nil {
+ return
+ }
+ return _result.GetSuccess(), nil
+}
+func (p *BackendServiceClient) CheckWarmUpCacheAsync(ctx context.Context, request *TCheckWarmUpCacheAsyncRequest) (r *TCheckWarmUpCacheAsyncResponse, err error) {
+ var _args BackendServiceCheckWarmUpCacheAsyncArgs
+ _args.Request = request
+ var _result BackendServiceCheckWarmUpCacheAsyncResult
+ if err = p.Client_().Call(ctx, "check_warm_up_cache_async", &_args, &_result); err != nil {
+ return
+ }
+ return _result.GetSuccess(), nil
+}
+func (p *BackendServiceClient) SyncLoadForTablets(ctx context.Context, request *TSyncLoadForTabletsRequest) (r *TSyncLoadForTabletsResponse, err error) {
+ var _args BackendServiceSyncLoadForTabletsArgs
+ _args.Request = request
+ var _result BackendServiceSyncLoadForTabletsResult
+ if err = p.Client_().Call(ctx, "sync_load_for_tablets", &_args, &_result); err != nil {
+ return
+ }
+ return _result.GetSuccess(), nil
+}
+func (p *BackendServiceClient) GetTopNHotPartitions(ctx context.Context, request *TGetTopNHotPartitionsRequest) (r *TGetTopNHotPartitionsResponse, err error) {
+ var _args BackendServiceGetTopNHotPartitionsArgs
+ _args.Request = request
+ var _result BackendServiceGetTopNHotPartitionsResult
+ if err = p.Client_().Call(ctx, "get_top_n_hot_partitions", &_args, &_result); err != nil {
+ return
+ }
+ return _result.GetSuccess(), nil
+}
+func (p *BackendServiceClient) WarmUpTablets(ctx context.Context, request *TWarmUpTabletsRequest) (r *TWarmUpTabletsResponse, err error) {
+ var _args BackendServiceWarmUpTabletsArgs
+ _args.Request = request
+ var _result BackendServiceWarmUpTabletsResult
+ if err = p.Client_().Call(ctx, "warm_up_tablets", &_args, &_result); err != nil {
+ return
+ }
+ return _result.GetSuccess(), nil
+}
+func (p *BackendServiceClient) IngestBinlog(ctx context.Context, ingestBinlogRequest *TIngestBinlogRequest) (r *TIngestBinlogResult_, err error) {
+ var _args BackendServiceIngestBinlogArgs
+ _args.IngestBinlogRequest = ingestBinlogRequest
+ var _result BackendServiceIngestBinlogResult
+ if err = p.Client_().Call(ctx, "ingest_binlog", &_args, &_result); err != nil {
+ return
+ }
+ return _result.GetSuccess(), nil
+}
+func (p *BackendServiceClient) QueryIngestBinlog(ctx context.Context, queryIngestBinlogRequest *TQueryIngestBinlogRequest) (r *TQueryIngestBinlogResult_, err error) {
+ var _args BackendServiceQueryIngestBinlogArgs
+ _args.QueryIngestBinlogRequest = queryIngestBinlogRequest
+ var _result BackendServiceQueryIngestBinlogResult
+ if err = p.Client_().Call(ctx, "query_ingest_binlog", &_args, &_result); err != nil {
+ return
+ }
+ return _result.GetSuccess(), nil
+}
+func (p *BackendServiceClient) PublishTopicInfo(ctx context.Context, topicRequest *TPublishTopicRequest) (r *TPublishTopicResult_, err error) {
+ var _args BackendServicePublishTopicInfoArgs
+ _args.TopicRequest = topicRequest
+ var _result BackendServicePublishTopicInfoResult
+ if err = p.Client_().Call(ctx, "publish_topic_info", &_args, &_result); err != nil {
+ return
+ }
+ return _result.GetSuccess(), nil
+}
+func (p *BackendServiceClient) GetRealtimeExecStatus(ctx context.Context, request *TGetRealtimeExecStatusRequest) (r *TGetRealtimeExecStatusResponse, err error) {
+ var _args BackendServiceGetRealtimeExecStatusArgs
+ _args.Request = request
+ var _result BackendServiceGetRealtimeExecStatusResult
+ if err = p.Client_().Call(ctx, "get_realtime_exec_status", &_args, &_result); err != nil {
+ return
+ }
+ return _result.GetSuccess(), nil
+}
+
+type BackendServiceProcessor struct {
+ processorMap map[string]thrift.TProcessorFunction
+ handler BackendService
+}
+
+func (p *BackendServiceProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) {
+ p.processorMap[key] = processor
+}
+
+func (p *BackendServiceProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) {
+ processor, ok = p.processorMap[key]
+ return processor, ok
+}
+
+func (p *BackendServiceProcessor) ProcessorMap() map[string]thrift.TProcessorFunction {
+ return p.processorMap
+}
+
+func NewBackendServiceProcessor(handler BackendService) *BackendServiceProcessor {
+ self := &BackendServiceProcessor{handler: handler, processorMap: make(map[string]thrift.TProcessorFunction)}
+ self.AddToProcessorMap("exec_plan_fragment", &backendServiceProcessorExecPlanFragment{handler: handler})
+ self.AddToProcessorMap("cancel_plan_fragment", &backendServiceProcessorCancelPlanFragment{handler: handler})
+ self.AddToProcessorMap("transmit_data", &backendServiceProcessorTransmitData{handler: handler})
+ self.AddToProcessorMap("submit_tasks", &backendServiceProcessorSubmitTasks{handler: handler})
+ self.AddToProcessorMap("make_snapshot", &backendServiceProcessorMakeSnapshot{handler: handler})
+ self.AddToProcessorMap("release_snapshot", &backendServiceProcessorReleaseSnapshot{handler: handler})
+ self.AddToProcessorMap("publish_cluster_state", &backendServiceProcessorPublishClusterState{handler: handler})
+ self.AddToProcessorMap("submit_export_task", &backendServiceProcessorSubmitExportTask{handler: handler})
+ self.AddToProcessorMap("get_export_status", &backendServiceProcessorGetExportStatus{handler: handler})
+ self.AddToProcessorMap("erase_export_task", &backendServiceProcessorEraseExportTask{handler: handler})
+ self.AddToProcessorMap("get_tablet_stat", &backendServiceProcessorGetTabletStat{handler: handler})
+ self.AddToProcessorMap("get_trash_used_capacity", &backendServiceProcessorGetTrashUsedCapacity{handler: handler})
+ self.AddToProcessorMap("get_disk_trash_used_capacity", &backendServiceProcessorGetDiskTrashUsedCapacity{handler: handler})
+ self.AddToProcessorMap("submit_routine_load_task", &backendServiceProcessorSubmitRoutineLoadTask{handler: handler})
+ self.AddToProcessorMap("open_scanner", &backendServiceProcessorOpenScanner{handler: handler})
+ self.AddToProcessorMap("get_next", &backendServiceProcessorGetNext{handler: handler})
+ self.AddToProcessorMap("close_scanner", &backendServiceProcessorCloseScanner{handler: handler})
+ self.AddToProcessorMap("get_stream_load_record", &backendServiceProcessorGetStreamLoadRecord{handler: handler})
+ self.AddToProcessorMap("check_storage_format", &backendServiceProcessorCheckStorageFormat{handler: handler})
+ self.AddToProcessorMap("warm_up_cache_async", &backendServiceProcessorWarmUpCacheAsync{handler: handler})
+ self.AddToProcessorMap("check_warm_up_cache_async", &backendServiceProcessorCheckWarmUpCacheAsync{handler: handler})
+ self.AddToProcessorMap("sync_load_for_tablets", &backendServiceProcessorSyncLoadForTablets{handler: handler})
+ self.AddToProcessorMap("get_top_n_hot_partitions", &backendServiceProcessorGetTopNHotPartitions{handler: handler})
+ self.AddToProcessorMap("warm_up_tablets", &backendServiceProcessorWarmUpTablets{handler: handler})
+ self.AddToProcessorMap("ingest_binlog", &backendServiceProcessorIngestBinlog{handler: handler})
+ self.AddToProcessorMap("query_ingest_binlog", &backendServiceProcessorQueryIngestBinlog{handler: handler})
+ self.AddToProcessorMap("publish_topic_info", &backendServiceProcessorPublishTopicInfo{handler: handler})
+ self.AddToProcessorMap("get_realtime_exec_status", &backendServiceProcessorGetRealtimeExecStatus{handler: handler})
+ return self
+}
+func (p *BackendServiceProcessor) Process(ctx context.Context, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
+ name, _, seqId, err := iprot.ReadMessageBegin()
+ if err != nil {
+ return false, err
+ }
+ if processor, ok := p.GetProcessorFunction(name); ok {
+ return processor.Process(ctx, seqId, iprot, oprot)
+ }
+ iprot.Skip(thrift.STRUCT)
+ iprot.ReadMessageEnd()
+ x := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function "+name)
+ oprot.WriteMessageBegin(name, thrift.EXCEPTION, seqId)
+ x.Write(oprot)
+ oprot.WriteMessageEnd()
+ oprot.Flush(ctx)
+ return false, x
+}
+
+type backendServiceProcessorExecPlanFragment struct {
+ handler BackendService
+}
+
+func (p *backendServiceProcessorExecPlanFragment) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
+ args := BackendServiceExecPlanFragmentArgs{}
+ if err = args.Read(iprot); err != nil {
+ iprot.ReadMessageEnd()
+ x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error())
+ oprot.WriteMessageBegin("exec_plan_fragment", thrift.EXCEPTION, seqId)
+ x.Write(oprot)
+ oprot.WriteMessageEnd()
+ oprot.Flush(ctx)
+ return false, err
+ }
+
+ iprot.ReadMessageEnd()
+ var err2 error
+ result := BackendServiceExecPlanFragmentResult{}
+ var retval *palointernalservice.TExecPlanFragmentResult_
+ if retval, err2 = p.handler.ExecPlanFragment(ctx, args.Params); err2 != nil {
+ x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing exec_plan_fragment: "+err2.Error())
+ oprot.WriteMessageBegin("exec_plan_fragment", thrift.EXCEPTION, seqId)
+ x.Write(oprot)
+ oprot.WriteMessageEnd()
+ oprot.Flush(ctx)
+ return true, err2
+ } else {
+ result.Success = retval
+ }
+ if err2 = oprot.WriteMessageBegin("exec_plan_fragment", thrift.REPLY, seqId); err2 != nil {
+ err = err2
+ }
+ if err2 = result.Write(oprot); err == nil && err2 != nil {
+ err = err2
+ }
+ if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil {
+ err = err2
+ }
+ if err2 = oprot.Flush(ctx); err == nil && err2 != nil {
+ err = err2
+ }
+ if err != nil {
+ return
+ }
+ return true, err
+}
+
+type backendServiceProcessorCancelPlanFragment struct {
+ handler BackendService
+}
+
+func (p *backendServiceProcessorCancelPlanFragment) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
+ args := BackendServiceCancelPlanFragmentArgs{}
+ if err = args.Read(iprot); err != nil {
+ iprot.ReadMessageEnd()
+ x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error())
+ oprot.WriteMessageBegin("cancel_plan_fragment", thrift.EXCEPTION, seqId)
+ x.Write(oprot)
+ oprot.WriteMessageEnd()
+ oprot.Flush(ctx)
+ return false, err
+ }
+
+ iprot.ReadMessageEnd()
+ var err2 error
+ result := BackendServiceCancelPlanFragmentResult{}
+ var retval *palointernalservice.TCancelPlanFragmentResult_
+ if retval, err2 = p.handler.CancelPlanFragment(ctx, args.Params); err2 != nil {
+ x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing cancel_plan_fragment: "+err2.Error())
+ oprot.WriteMessageBegin("cancel_plan_fragment", thrift.EXCEPTION, seqId)
+ x.Write(oprot)
+ oprot.WriteMessageEnd()
+ oprot.Flush(ctx)
+ return true, err2
+ } else {
+ result.Success = retval
+ }
+ if err2 = oprot.WriteMessageBegin("cancel_plan_fragment", thrift.REPLY, seqId); err2 != nil {
+ err = err2
+ }
+ if err2 = result.Write(oprot); err == nil && err2 != nil {
+ err = err2
+ }
+ if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil {
+ err = err2
+ }
+ if err2 = oprot.Flush(ctx); err == nil && err2 != nil {
+ err = err2
+ }
+ if err != nil {
+ return
+ }
+ return true, err
+}
+
+type backendServiceProcessorTransmitData struct {
+ handler BackendService
+}
+
+func (p *backendServiceProcessorTransmitData) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
+ args := BackendServiceTransmitDataArgs{}
+ if err = args.Read(iprot); err != nil {
+ iprot.ReadMessageEnd()
+ x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error())
+ oprot.WriteMessageBegin("transmit_data", thrift.EXCEPTION, seqId)
+ x.Write(oprot)
+ oprot.WriteMessageEnd()
+ oprot.Flush(ctx)
+ return false, err
+ }
+
+ iprot.ReadMessageEnd()
+ var err2 error
+ result := BackendServiceTransmitDataResult{}
+ var retval *palointernalservice.TTransmitDataResult_
+ if retval, err2 = p.handler.TransmitData(ctx, args.Params); err2 != nil {
+ x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing transmit_data: "+err2.Error())
+ oprot.WriteMessageBegin("transmit_data", thrift.EXCEPTION, seqId)
+ x.Write(oprot)
+ oprot.WriteMessageEnd()
+ oprot.Flush(ctx)
+ return true, err2
+ } else {
+ result.Success = retval
+ }
+ if err2 = oprot.WriteMessageBegin("transmit_data", thrift.REPLY, seqId); err2 != nil {
+ err = err2
+ }
+ if err2 = result.Write(oprot); err == nil && err2 != nil {
+ err = err2
+ }
+ if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil {
+ err = err2
+ }
+ if err2 = oprot.Flush(ctx); err == nil && err2 != nil {
+ err = err2
+ }
+ if err != nil {
+ return
+ }
+ return true, err
+}
+
+type backendServiceProcessorSubmitTasks struct {
+ handler BackendService
+}
+
+func (p *backendServiceProcessorSubmitTasks) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
+ args := BackendServiceSubmitTasksArgs{}
+ if err = args.Read(iprot); err != nil {
+ iprot.ReadMessageEnd()
+ x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error())
+ oprot.WriteMessageBegin("submit_tasks", thrift.EXCEPTION, seqId)
+ x.Write(oprot)
+ oprot.WriteMessageEnd()
+ oprot.Flush(ctx)
+ return false, err
+ }
+
+ iprot.ReadMessageEnd()
+ var err2 error
+ result := BackendServiceSubmitTasksResult{}
+ var retval *agentservice.TAgentResult_
+ if retval, err2 = p.handler.SubmitTasks(ctx, args.Tasks); err2 != nil {
+ x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing submit_tasks: "+err2.Error())
+ oprot.WriteMessageBegin("submit_tasks", thrift.EXCEPTION, seqId)
+ x.Write(oprot)
+ oprot.WriteMessageEnd()
+ oprot.Flush(ctx)
+ return true, err2
+ } else {
+ result.Success = retval
+ }
+ if err2 = oprot.WriteMessageBegin("submit_tasks", thrift.REPLY, seqId); err2 != nil {
+ err = err2
+ }
+ if err2 = result.Write(oprot); err == nil && err2 != nil {
+ err = err2
+ }
+ if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil {
+ err = err2
+ }
+ if err2 = oprot.Flush(ctx); err == nil && err2 != nil {
+ err = err2
+ }
+ if err != nil {
+ return
+ }
+ return true, err
+}
+
+type backendServiceProcessorMakeSnapshot struct {
+ handler BackendService
+}
+
+func (p *backendServiceProcessorMakeSnapshot) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
+ args := BackendServiceMakeSnapshotArgs{}
+ if err = args.Read(iprot); err != nil {
+ iprot.ReadMessageEnd()
+ x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error())
+ oprot.WriteMessageBegin("make_snapshot", thrift.EXCEPTION, seqId)
+ x.Write(oprot)
+ oprot.WriteMessageEnd()
+ oprot.Flush(ctx)
+ return false, err
+ }
+
+ iprot.ReadMessageEnd()
+ var err2 error
+ result := BackendServiceMakeSnapshotResult{}
+ var retval *agentservice.TAgentResult_
+ if retval, err2 = p.handler.MakeSnapshot(ctx, args.SnapshotRequest); err2 != nil {
+ x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing make_snapshot: "+err2.Error())
+ oprot.WriteMessageBegin("make_snapshot", thrift.EXCEPTION, seqId)
+ x.Write(oprot)
+ oprot.WriteMessageEnd()
+ oprot.Flush(ctx)
+ return true, err2
+ } else {
+ result.Success = retval
+ }
+ if err2 = oprot.WriteMessageBegin("make_snapshot", thrift.REPLY, seqId); err2 != nil {
+ err = err2
+ }
+ if err2 = result.Write(oprot); err == nil && err2 != nil {
+ err = err2
+ }
+ if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil {
+ err = err2
+ }
+ if err2 = oprot.Flush(ctx); err == nil && err2 != nil {
+ err = err2
+ }
+ if err != nil {
+ return
+ }
+ return true, err
+}
+
+type backendServiceProcessorReleaseSnapshot struct {
+ handler BackendService
+}
+
+func (p *backendServiceProcessorReleaseSnapshot) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
+ args := BackendServiceReleaseSnapshotArgs{}
+ if err = args.Read(iprot); err != nil {
+ iprot.ReadMessageEnd()
+ x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error())
+ oprot.WriteMessageBegin("release_snapshot", thrift.EXCEPTION, seqId)
+ x.Write(oprot)
+ oprot.WriteMessageEnd()
+ oprot.Flush(ctx)
+ return false, err
+ }
+
+ iprot.ReadMessageEnd()
+ var err2 error
+ result := BackendServiceReleaseSnapshotResult{}
+ var retval *agentservice.TAgentResult_
+ if retval, err2 = p.handler.ReleaseSnapshot(ctx, args.SnapshotPath); err2 != nil {
+ x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing release_snapshot: "+err2.Error())
+ oprot.WriteMessageBegin("release_snapshot", thrift.EXCEPTION, seqId)
+ x.Write(oprot)
+ oprot.WriteMessageEnd()
+ oprot.Flush(ctx)
+ return true, err2
+ } else {
+ result.Success = retval
+ }
+ if err2 = oprot.WriteMessageBegin("release_snapshot", thrift.REPLY, seqId); err2 != nil {
+ err = err2
+ }
+ if err2 = result.Write(oprot); err == nil && err2 != nil {
+ err = err2
+ }
+ if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil {
+ err = err2
+ }
+ if err2 = oprot.Flush(ctx); err == nil && err2 != nil {
+ err = err2
+ }
+ if err != nil {
+ return
+ }
+ return true, err
+}
+
+type backendServiceProcessorPublishClusterState struct {
+ handler BackendService
+}
+
+func (p *backendServiceProcessorPublishClusterState) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
+ args := BackendServicePublishClusterStateArgs{}
+ if err = args.Read(iprot); err != nil {
+ iprot.ReadMessageEnd()
+ x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error())
+ oprot.WriteMessageBegin("publish_cluster_state", thrift.EXCEPTION, seqId)
+ x.Write(oprot)
+ oprot.WriteMessageEnd()
+ oprot.Flush(ctx)
+ return false, err
+ }
+
+ iprot.ReadMessageEnd()
+ var err2 error
+ result := BackendServicePublishClusterStateResult{}
+ var retval *agentservice.TAgentResult_
+ if retval, err2 = p.handler.PublishClusterState(ctx, args.Request); err2 != nil {
+ x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing publish_cluster_state: "+err2.Error())
+ oprot.WriteMessageBegin("publish_cluster_state", thrift.EXCEPTION, seqId)
+ x.Write(oprot)
+ oprot.WriteMessageEnd()
+ oprot.Flush(ctx)
+ return true, err2
+ } else {
+ result.Success = retval
+ }
+ if err2 = oprot.WriteMessageBegin("publish_cluster_state", thrift.REPLY, seqId); err2 != nil {
+ err = err2
+ }
+ if err2 = result.Write(oprot); err == nil && err2 != nil {
+ err = err2
+ }
+ if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil {
+ err = err2
+ }
+ if err2 = oprot.Flush(ctx); err == nil && err2 != nil {
+ err = err2
+ }
+ if err != nil {
+ return
+ }
+ return true, err
+}
+
+type backendServiceProcessorSubmitExportTask struct {
+ handler BackendService
+}
+
+func (p *backendServiceProcessorSubmitExportTask) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
+ args := BackendServiceSubmitExportTaskArgs{}
+ if err = args.Read(iprot); err != nil {
+ iprot.ReadMessageEnd()
+ x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error())
+ oprot.WriteMessageBegin("submit_export_task", thrift.EXCEPTION, seqId)
+ x.Write(oprot)
+ oprot.WriteMessageEnd()
+ oprot.Flush(ctx)
+ return false, err
+ }
+
+ iprot.ReadMessageEnd()
+ var err2 error
+ result := BackendServiceSubmitExportTaskResult{}
+ var retval *status.TStatus
+ if retval, err2 = p.handler.SubmitExportTask(ctx, args.Request); err2 != nil {
+ x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing submit_export_task: "+err2.Error())
+ oprot.WriteMessageBegin("submit_export_task", thrift.EXCEPTION, seqId)
+ x.Write(oprot)
+ oprot.WriteMessageEnd()
+ oprot.Flush(ctx)
+ return true, err2
+ } else {
+ result.Success = retval
+ }
+ if err2 = oprot.WriteMessageBegin("submit_export_task", thrift.REPLY, seqId); err2 != nil {
+ err = err2
+ }
+ if err2 = result.Write(oprot); err == nil && err2 != nil {
+ err = err2
+ }
+ if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil {
+ err = err2
+ }
+ if err2 = oprot.Flush(ctx); err == nil && err2 != nil {
+ err = err2
+ }
+ if err != nil {
+ return
+ }
+ return true, err
+}
+
+type backendServiceProcessorGetExportStatus struct {
+ handler BackendService
+}
+
+func (p *backendServiceProcessorGetExportStatus) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
+ args := BackendServiceGetExportStatusArgs{}
+ if err = args.Read(iprot); err != nil {
+ iprot.ReadMessageEnd()
+ x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error())
+ oprot.WriteMessageBegin("get_export_status", thrift.EXCEPTION, seqId)
+ x.Write(oprot)
+ oprot.WriteMessageEnd()
+ oprot.Flush(ctx)
+ return false, err
+ }
+
+ iprot.ReadMessageEnd()
+ var err2 error
+ result := BackendServiceGetExportStatusResult{}
+ var retval *palointernalservice.TExportStatusResult_
+ if retval, err2 = p.handler.GetExportStatus(ctx, args.TaskId); err2 != nil {
+ x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing get_export_status: "+err2.Error())
+ oprot.WriteMessageBegin("get_export_status", thrift.EXCEPTION, seqId)
+ x.Write(oprot)
+ oprot.WriteMessageEnd()
+ oprot.Flush(ctx)
+ return true, err2
+ } else {
+ result.Success = retval
+ }
+ if err2 = oprot.WriteMessageBegin("get_export_status", thrift.REPLY, seqId); err2 != nil {
+ err = err2
+ }
+ if err2 = result.Write(oprot); err == nil && err2 != nil {
+ err = err2
+ }
+ if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil {
+ err = err2
+ }
+ if err2 = oprot.Flush(ctx); err == nil && err2 != nil {
+ err = err2
+ }
+ if err != nil {
+ return
+ }
+ return true, err
+}
+
+type backendServiceProcessorEraseExportTask struct {
+ handler BackendService
+}
+
+func (p *backendServiceProcessorEraseExportTask) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
+ args := BackendServiceEraseExportTaskArgs{}
+ if err = args.Read(iprot); err != nil {
+ iprot.ReadMessageEnd()
+ x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error())
+ oprot.WriteMessageBegin("erase_export_task", thrift.EXCEPTION, seqId)
+ x.Write(oprot)
+ oprot.WriteMessageEnd()
+ oprot.Flush(ctx)
+ return false, err
+ }
+
+ iprot.ReadMessageEnd()
+ var err2 error
+ result := BackendServiceEraseExportTaskResult{}
+ var retval *status.TStatus
+ if retval, err2 = p.handler.EraseExportTask(ctx, args.TaskId); err2 != nil {
+ x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing erase_export_task: "+err2.Error())
+ oprot.WriteMessageBegin("erase_export_task", thrift.EXCEPTION, seqId)
+ x.Write(oprot)
+ oprot.WriteMessageEnd()
+ oprot.Flush(ctx)
+ return true, err2
+ } else {
+ result.Success = retval
+ }
+ if err2 = oprot.WriteMessageBegin("erase_export_task", thrift.REPLY, seqId); err2 != nil {
+ err = err2
+ }
+ if err2 = result.Write(oprot); err == nil && err2 != nil {
+ err = err2
+ }
+ if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil {
+ err = err2
+ }
+ if err2 = oprot.Flush(ctx); err == nil && err2 != nil {
+ err = err2
+ }
+ if err != nil {
+ return
+ }
+ return true, err
+}
+
+type backendServiceProcessorGetTabletStat struct {
+ handler BackendService
+}
+
+func (p *backendServiceProcessorGetTabletStat) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
+ args := BackendServiceGetTabletStatArgs{}
+ if err = args.Read(iprot); err != nil {
+ iprot.ReadMessageEnd()
+ x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error())
+ oprot.WriteMessageBegin("get_tablet_stat", thrift.EXCEPTION, seqId)
+ x.Write(oprot)
+ oprot.WriteMessageEnd()
+ oprot.Flush(ctx)
+ return false, err
+ }
+
+ iprot.ReadMessageEnd()
+ var err2 error
+ result := BackendServiceGetTabletStatResult{}
+ var retval *TTabletStatResult_
+ if retval, err2 = p.handler.GetTabletStat(ctx); err2 != nil {
+ x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing get_tablet_stat: "+err2.Error())
+ oprot.WriteMessageBegin("get_tablet_stat", thrift.EXCEPTION, seqId)
+ x.Write(oprot)
+ oprot.WriteMessageEnd()
+ oprot.Flush(ctx)
+ return true, err2
+ } else {
+ result.Success = retval
+ }
+ if err2 = oprot.WriteMessageBegin("get_tablet_stat", thrift.REPLY, seqId); err2 != nil {
+ err = err2
+ }
+ if err2 = result.Write(oprot); err == nil && err2 != nil {
+ err = err2
+ }
+ if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil {
+ err = err2
+ }
+ if err2 = oprot.Flush(ctx); err == nil && err2 != nil {
+ err = err2
+ }
+ if err != nil {
+ return
+ }
+ return true, err
+}
+
+type backendServiceProcessorGetTrashUsedCapacity struct {
+ handler BackendService
+}
+
+func (p *backendServiceProcessorGetTrashUsedCapacity) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
+ args := BackendServiceGetTrashUsedCapacityArgs{}
+ if err = args.Read(iprot); err != nil {
+ iprot.ReadMessageEnd()
+ x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error())
+ oprot.WriteMessageBegin("get_trash_used_capacity", thrift.EXCEPTION, seqId)
+ x.Write(oprot)
+ oprot.WriteMessageEnd()
+ oprot.Flush(ctx)
+ return false, err
+ }
+
+ iprot.ReadMessageEnd()
+ var err2 error
+ result := BackendServiceGetTrashUsedCapacityResult{}
+ var retval int64
+ if retval, err2 = p.handler.GetTrashUsedCapacity(ctx); err2 != nil {
+ x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing get_trash_used_capacity: "+err2.Error())
+ oprot.WriteMessageBegin("get_trash_used_capacity", thrift.EXCEPTION, seqId)
+ x.Write(oprot)
+ oprot.WriteMessageEnd()
+ oprot.Flush(ctx)
+ return true, err2
+ } else {
+ result.Success = &retval
+ }
+ if err2 = oprot.WriteMessageBegin("get_trash_used_capacity", thrift.REPLY, seqId); err2 != nil {
+ err = err2
+ }
+ if err2 = result.Write(oprot); err == nil && err2 != nil {
+ err = err2
+ }
+ if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil {
+ err = err2
+ }
+ if err2 = oprot.Flush(ctx); err == nil && err2 != nil {
+ err = err2
+ }
+ if err != nil {
+ return
+ }
+ return true, err
+}
+
+type backendServiceProcessorGetDiskTrashUsedCapacity struct {
+ handler BackendService
+}
+
+func (p *backendServiceProcessorGetDiskTrashUsedCapacity) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
+ args := BackendServiceGetDiskTrashUsedCapacityArgs{}
+ if err = args.Read(iprot); err != nil {
+ iprot.ReadMessageEnd()
+ x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error())
+ oprot.WriteMessageBegin("get_disk_trash_used_capacity", thrift.EXCEPTION, seqId)
+ x.Write(oprot)
+ oprot.WriteMessageEnd()
+ oprot.Flush(ctx)
+ return false, err
+ }
+
+ iprot.ReadMessageEnd()
+ var err2 error
+ result := BackendServiceGetDiskTrashUsedCapacityResult{}
+ var retval []*TDiskTrashInfo
+ if retval, err2 = p.handler.GetDiskTrashUsedCapacity(ctx); err2 != nil {
+ x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing get_disk_trash_used_capacity: "+err2.Error())
+ oprot.WriteMessageBegin("get_disk_trash_used_capacity", thrift.EXCEPTION, seqId)
+ x.Write(oprot)
+ oprot.WriteMessageEnd()
+ oprot.Flush(ctx)
+ return true, err2
+ } else {
+ result.Success = retval
+ }
+ if err2 = oprot.WriteMessageBegin("get_disk_trash_used_capacity", thrift.REPLY, seqId); err2 != nil {
+ err = err2
+ }
+ if err2 = result.Write(oprot); err == nil && err2 != nil {
+ err = err2
+ }
+ if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil {
+ err = err2
+ }
+ if err2 = oprot.Flush(ctx); err == nil && err2 != nil {
+ err = err2
+ }
+ if err != nil {
+ return
+ }
+ return true, err
+}
+
+type backendServiceProcessorSubmitRoutineLoadTask struct {
+ handler BackendService
+}
+
+func (p *backendServiceProcessorSubmitRoutineLoadTask) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
+ args := BackendServiceSubmitRoutineLoadTaskArgs{}
+ if err = args.Read(iprot); err != nil {
+ iprot.ReadMessageEnd()
+ x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error())
+ oprot.WriteMessageBegin("submit_routine_load_task", thrift.EXCEPTION, seqId)
+ x.Write(oprot)
+ oprot.WriteMessageEnd()
+ oprot.Flush(ctx)
+ return false, err
+ }
+
+ iprot.ReadMessageEnd()
+ var err2 error
+ result := BackendServiceSubmitRoutineLoadTaskResult{}
+ var retval *status.TStatus
+ if retval, err2 = p.handler.SubmitRoutineLoadTask(ctx, args.Tasks); err2 != nil {
+ x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing submit_routine_load_task: "+err2.Error())
+ oprot.WriteMessageBegin("submit_routine_load_task", thrift.EXCEPTION, seqId)
+ x.Write(oprot)
+ oprot.WriteMessageEnd()
+ oprot.Flush(ctx)
+ return true, err2
+ } else {
+ result.Success = retval
+ }
+ if err2 = oprot.WriteMessageBegin("submit_routine_load_task", thrift.REPLY, seqId); err2 != nil {
+ err = err2
+ }
+ if err2 = result.Write(oprot); err == nil && err2 != nil {
+ err = err2
+ }
+ if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil {
+ err = err2
+ }
+ if err2 = oprot.Flush(ctx); err == nil && err2 != nil {
+ err = err2
+ }
+ if err != nil {
+ return
+ }
+ return true, err
+}
+
+type backendServiceProcessorOpenScanner struct {
+ handler BackendService
+}
+
+func (p *backendServiceProcessorOpenScanner) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
+ args := BackendServiceOpenScannerArgs{}
+ if err = args.Read(iprot); err != nil {
+ iprot.ReadMessageEnd()
+ x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error())
+ oprot.WriteMessageBegin("open_scanner", thrift.EXCEPTION, seqId)
+ x.Write(oprot)
+ oprot.WriteMessageEnd()
+ oprot.Flush(ctx)
+ return false, err
+ }
+
+ iprot.ReadMessageEnd()
+ var err2 error
+ result := BackendServiceOpenScannerResult{}
+ var retval *dorisexternalservice.TScanOpenResult_
+ if retval, err2 = p.handler.OpenScanner(ctx, args.Params); err2 != nil {
+ x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing open_scanner: "+err2.Error())
+ oprot.WriteMessageBegin("open_scanner", thrift.EXCEPTION, seqId)
+ x.Write(oprot)
+ oprot.WriteMessageEnd()
+ oprot.Flush(ctx)
+ return true, err2
+ } else {
+ result.Success = retval
+ }
+ if err2 = oprot.WriteMessageBegin("open_scanner", thrift.REPLY, seqId); err2 != nil {
+ err = err2
+ }
+ if err2 = result.Write(oprot); err == nil && err2 != nil {
+ err = err2
+ }
+ if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil {
+ err = err2
+ }
+ if err2 = oprot.Flush(ctx); err == nil && err2 != nil {
+ err = err2
+ }
+ if err != nil {
+ return
+ }
+ return true, err
+}
+
+type backendServiceProcessorGetNext struct {
+ handler BackendService
+}
+
+func (p *backendServiceProcessorGetNext) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
+ args := BackendServiceGetNextArgs{}
+ if err = args.Read(iprot); err != nil {
+ iprot.ReadMessageEnd()
+ x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error())
+ oprot.WriteMessageBegin("get_next", thrift.EXCEPTION, seqId)
+ x.Write(oprot)
+ oprot.WriteMessageEnd()
+ oprot.Flush(ctx)
+ return false, err
+ }
+
+ iprot.ReadMessageEnd()
+ var err2 error
+ result := BackendServiceGetNextResult{}
+ var retval *dorisexternalservice.TScanBatchResult_
+ if retval, err2 = p.handler.GetNext(ctx, args.Params); err2 != nil {
+ x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing get_next: "+err2.Error())
+ oprot.WriteMessageBegin("get_next", thrift.EXCEPTION, seqId)
+ x.Write(oprot)
+ oprot.WriteMessageEnd()
+ oprot.Flush(ctx)
+ return true, err2
+ } else {
+ result.Success = retval
+ }
+ if err2 = oprot.WriteMessageBegin("get_next", thrift.REPLY, seqId); err2 != nil {
+ err = err2
+ }
+ if err2 = result.Write(oprot); err == nil && err2 != nil {
+ err = err2
+ }
+ if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil {
+ err = err2
+ }
+ if err2 = oprot.Flush(ctx); err == nil && err2 != nil {
+ err = err2
+ }
+ if err != nil {
+ return
+ }
+ return true, err
+}
+
+type backendServiceProcessorCloseScanner struct {
+ handler BackendService
+}
+
+func (p *backendServiceProcessorCloseScanner) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
+ args := BackendServiceCloseScannerArgs{}
+ if err = args.Read(iprot); err != nil {
+ iprot.ReadMessageEnd()
+ x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error())
+ oprot.WriteMessageBegin("close_scanner", thrift.EXCEPTION, seqId)
+ x.Write(oprot)
+ oprot.WriteMessageEnd()
+ oprot.Flush(ctx)
+ return false, err
+ }
+
+ iprot.ReadMessageEnd()
+ var err2 error
+ result := BackendServiceCloseScannerResult{}
+ var retval *dorisexternalservice.TScanCloseResult_
+ if retval, err2 = p.handler.CloseScanner(ctx, args.Params); err2 != nil {
+ x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing close_scanner: "+err2.Error())
+ oprot.WriteMessageBegin("close_scanner", thrift.EXCEPTION, seqId)
+ x.Write(oprot)
+ oprot.WriteMessageEnd()
+ oprot.Flush(ctx)
+ return true, err2
+ } else {
+ result.Success = retval
+ }
+ if err2 = oprot.WriteMessageBegin("close_scanner", thrift.REPLY, seqId); err2 != nil {
+ err = err2
+ }
+ if err2 = result.Write(oprot); err == nil && err2 != nil {
+ err = err2
+ }
+ if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil {
+ err = err2
+ }
+ if err2 = oprot.Flush(ctx); err == nil && err2 != nil {
+ err = err2
+ }
+ if err != nil {
+ return
+ }
+ return true, err
+}
+
+type backendServiceProcessorGetStreamLoadRecord struct {
+ handler BackendService
+}
+
+func (p *backendServiceProcessorGetStreamLoadRecord) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
+ args := BackendServiceGetStreamLoadRecordArgs{}
+ if err = args.Read(iprot); err != nil {
+ iprot.ReadMessageEnd()
+ x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error())
+ oprot.WriteMessageBegin("get_stream_load_record", thrift.EXCEPTION, seqId)
+ x.Write(oprot)
+ oprot.WriteMessageEnd()
+ oprot.Flush(ctx)
+ return false, err
+ }
+
+ iprot.ReadMessageEnd()
+ var err2 error
+ result := BackendServiceGetStreamLoadRecordResult{}
+ var retval *TStreamLoadRecordResult_
+ if retval, err2 = p.handler.GetStreamLoadRecord(ctx, args.LastStreamRecordTime); err2 != nil {
+ x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing get_stream_load_record: "+err2.Error())
+ oprot.WriteMessageBegin("get_stream_load_record", thrift.EXCEPTION, seqId)
+ x.Write(oprot)
+ oprot.WriteMessageEnd()
+ oprot.Flush(ctx)
+ return true, err2
+ } else {
+ result.Success = retval
+ }
+ if err2 = oprot.WriteMessageBegin("get_stream_load_record", thrift.REPLY, seqId); err2 != nil {
+ err = err2
+ }
+ if err2 = result.Write(oprot); err == nil && err2 != nil {
+ err = err2
+ }
+ if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil {
+ err = err2
+ }
+ if err2 = oprot.Flush(ctx); err == nil && err2 != nil {
+ err = err2
+ }
+ if err != nil {
+ return
+ }
+ return true, err
+}
+
+type backendServiceProcessorCheckStorageFormat struct {
+ handler BackendService
+}
+
+func (p *backendServiceProcessorCheckStorageFormat) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
+ args := BackendServiceCheckStorageFormatArgs{}
+ if err = args.Read(iprot); err != nil {
+ iprot.ReadMessageEnd()
+ x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error())
+ oprot.WriteMessageBegin("check_storage_format", thrift.EXCEPTION, seqId)
+ x.Write(oprot)
+ oprot.WriteMessageEnd()
+ oprot.Flush(ctx)
+ return false, err
+ }
+
+ iprot.ReadMessageEnd()
+ var err2 error
+ result := BackendServiceCheckStorageFormatResult{}
+ var retval *TCheckStorageFormatResult_
+ if retval, err2 = p.handler.CheckStorageFormat(ctx); err2 != nil {
+ x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing check_storage_format: "+err2.Error())
+ oprot.WriteMessageBegin("check_storage_format", thrift.EXCEPTION, seqId)
+ x.Write(oprot)
+ oprot.WriteMessageEnd()
+ oprot.Flush(ctx)
+ return true, err2
+ } else {
+ result.Success = retval
+ }
+ if err2 = oprot.WriteMessageBegin("check_storage_format", thrift.REPLY, seqId); err2 != nil {
+ err = err2
+ }
+ if err2 = result.Write(oprot); err == nil && err2 != nil {
+ err = err2
+ }
+ if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil {
+ err = err2
+ }
+ if err2 = oprot.Flush(ctx); err == nil && err2 != nil {
+ err = err2
+ }
+ if err != nil {
+ return
+ }
+ return true, err
+}
+
+type backendServiceProcessorWarmUpCacheAsync struct {
+ handler BackendService
+}
+
+func (p *backendServiceProcessorWarmUpCacheAsync) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
+ args := BackendServiceWarmUpCacheAsyncArgs{}
+ if err = args.Read(iprot); err != nil {
+ iprot.ReadMessageEnd()
+ x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error())
+ oprot.WriteMessageBegin("warm_up_cache_async", thrift.EXCEPTION, seqId)
+ x.Write(oprot)
+ oprot.WriteMessageEnd()
+ oprot.Flush(ctx)
+ return false, err
+ }
+
+ iprot.ReadMessageEnd()
+ var err2 error
+ result := BackendServiceWarmUpCacheAsyncResult{}
+ var retval *TWarmUpCacheAsyncResponse
+ if retval, err2 = p.handler.WarmUpCacheAsync(ctx, args.Request); err2 != nil {
+ x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing warm_up_cache_async: "+err2.Error())
+ oprot.WriteMessageBegin("warm_up_cache_async", thrift.EXCEPTION, seqId)
+ x.Write(oprot)
+ oprot.WriteMessageEnd()
+ oprot.Flush(ctx)
+ return true, err2
+ } else {
+ result.Success = retval
+ }
+ if err2 = oprot.WriteMessageBegin("warm_up_cache_async", thrift.REPLY, seqId); err2 != nil {
+ err = err2
+ }
+ if err2 = result.Write(oprot); err == nil && err2 != nil {
+ err = err2
+ }
+ if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil {
+ err = err2
+ }
+ if err2 = oprot.Flush(ctx); err == nil && err2 != nil {
+ err = err2
+ }
+ if err != nil {
+ return
+ }
+ return true, err
+}
+
+type backendServiceProcessorCheckWarmUpCacheAsync struct {
+ handler BackendService
+}
+
+func (p *backendServiceProcessorCheckWarmUpCacheAsync) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
+ args := BackendServiceCheckWarmUpCacheAsyncArgs{}
+ if err = args.Read(iprot); err != nil {
+ iprot.ReadMessageEnd()
+ x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error())
+ oprot.WriteMessageBegin("check_warm_up_cache_async", thrift.EXCEPTION, seqId)
+ x.Write(oprot)
+ oprot.WriteMessageEnd()
+ oprot.Flush(ctx)
+ return false, err
+ }
+
+ iprot.ReadMessageEnd()
+ var err2 error
+ result := BackendServiceCheckWarmUpCacheAsyncResult{}
+ var retval *TCheckWarmUpCacheAsyncResponse
+ if retval, err2 = p.handler.CheckWarmUpCacheAsync(ctx, args.Request); err2 != nil {
+ x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing check_warm_up_cache_async: "+err2.Error())
+ oprot.WriteMessageBegin("check_warm_up_cache_async", thrift.EXCEPTION, seqId)
+ x.Write(oprot)
+ oprot.WriteMessageEnd()
+ oprot.Flush(ctx)
+ return true, err2
+ } else {
+ result.Success = retval
+ }
+ if err2 = oprot.WriteMessageBegin("check_warm_up_cache_async", thrift.REPLY, seqId); err2 != nil {
+ err = err2
+ }
+ if err2 = result.Write(oprot); err == nil && err2 != nil {
+ err = err2
+ }
+ if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil {
+ err = err2
+ }
+ if err2 = oprot.Flush(ctx); err == nil && err2 != nil {
+ err = err2
+ }
+ if err != nil {
+ return
+ }
+ return true, err
+}
+
+type backendServiceProcessorSyncLoadForTablets struct {
+ handler BackendService
+}
+
+func (p *backendServiceProcessorSyncLoadForTablets) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
+ args := BackendServiceSyncLoadForTabletsArgs{}
+ if err = args.Read(iprot); err != nil {
+ iprot.ReadMessageEnd()
+ x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error())
+ oprot.WriteMessageBegin("sync_load_for_tablets", thrift.EXCEPTION, seqId)
+ x.Write(oprot)
+ oprot.WriteMessageEnd()
+ oprot.Flush(ctx)
+ return false, err
+ }
+
+ iprot.ReadMessageEnd()
+ var err2 error
+ result := BackendServiceSyncLoadForTabletsResult{}
+ var retval *TSyncLoadForTabletsResponse
+ if retval, err2 = p.handler.SyncLoadForTablets(ctx, args.Request); err2 != nil {
+ x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing sync_load_for_tablets: "+err2.Error())
+ oprot.WriteMessageBegin("sync_load_for_tablets", thrift.EXCEPTION, seqId)
+ x.Write(oprot)
+ oprot.WriteMessageEnd()
+ oprot.Flush(ctx)
+ return true, err2
+ } else {
+ result.Success = retval
+ }
+ if err2 = oprot.WriteMessageBegin("sync_load_for_tablets", thrift.REPLY, seqId); err2 != nil {
+ err = err2
+ }
+ if err2 = result.Write(oprot); err == nil && err2 != nil {
+ err = err2
+ }
+ if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil {
+ err = err2
+ }
+ if err2 = oprot.Flush(ctx); err == nil && err2 != nil {
+ err = err2
+ }
+ if err != nil {
+ return
+ }
+ return true, err
+}
+
+type backendServiceProcessorGetTopNHotPartitions struct {
+ handler BackendService
+}
+
+func (p *backendServiceProcessorGetTopNHotPartitions) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
+ args := BackendServiceGetTopNHotPartitionsArgs{}
+ if err = args.Read(iprot); err != nil {
+ iprot.ReadMessageEnd()
+ x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error())
+ oprot.WriteMessageBegin("get_top_n_hot_partitions", thrift.EXCEPTION, seqId)
+ x.Write(oprot)
+ oprot.WriteMessageEnd()
+ oprot.Flush(ctx)
+ return false, err
+ }
+
+ iprot.ReadMessageEnd()
+ var err2 error
+ result := BackendServiceGetTopNHotPartitionsResult{}
+ var retval *TGetTopNHotPartitionsResponse
+ if retval, err2 = p.handler.GetTopNHotPartitions(ctx, args.Request); err2 != nil {
+ x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing get_top_n_hot_partitions: "+err2.Error())
+ oprot.WriteMessageBegin("get_top_n_hot_partitions", thrift.EXCEPTION, seqId)
+ x.Write(oprot)
+ oprot.WriteMessageEnd()
+ oprot.Flush(ctx)
+ return true, err2
+ } else {
+ result.Success = retval
+ }
+ if err2 = oprot.WriteMessageBegin("get_top_n_hot_partitions", thrift.REPLY, seqId); err2 != nil {
+ err = err2
+ }
+ if err2 = result.Write(oprot); err == nil && err2 != nil {
+ err = err2
+ }
+ if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil {
+ err = err2
+ }
+ if err2 = oprot.Flush(ctx); err == nil && err2 != nil {
+ err = err2
+ }
+ if err != nil {
+ return
+ }
+ return true, err
+}
+
+type backendServiceProcessorWarmUpTablets struct {
+ handler BackendService
+}
+
+func (p *backendServiceProcessorWarmUpTablets) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
+ args := BackendServiceWarmUpTabletsArgs{}
+ if err = args.Read(iprot); err != nil {
+ iprot.ReadMessageEnd()
+ x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error())
+ oprot.WriteMessageBegin("warm_up_tablets", thrift.EXCEPTION, seqId)
+ x.Write(oprot)
+ oprot.WriteMessageEnd()
+ oprot.Flush(ctx)
+ return false, err
+ }
+
+ iprot.ReadMessageEnd()
+ var err2 error
+ result := BackendServiceWarmUpTabletsResult{}
+ var retval *TWarmUpTabletsResponse
+ if retval, err2 = p.handler.WarmUpTablets(ctx, args.Request); err2 != nil {
+ x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing warm_up_tablets: "+err2.Error())
+ oprot.WriteMessageBegin("warm_up_tablets", thrift.EXCEPTION, seqId)
+ x.Write(oprot)
+ oprot.WriteMessageEnd()
+ oprot.Flush(ctx)
+ return true, err2
+ } else {
+ result.Success = retval
+ }
+ if err2 = oprot.WriteMessageBegin("warm_up_tablets", thrift.REPLY, seqId); err2 != nil {
+ err = err2
+ }
+ if err2 = result.Write(oprot); err == nil && err2 != nil {
+ err = err2
+ }
+ if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil {
+ err = err2
+ }
+ if err2 = oprot.Flush(ctx); err == nil && err2 != nil {
+ err = err2
+ }
+ if err != nil {
+ return
+ }
+ return true, err
+}
+
+type backendServiceProcessorIngestBinlog struct {
+ handler BackendService
+}
+
+func (p *backendServiceProcessorIngestBinlog) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
+ args := BackendServiceIngestBinlogArgs{}
+ if err = args.Read(iprot); err != nil {
+ iprot.ReadMessageEnd()
+ x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error())
+ oprot.WriteMessageBegin("ingest_binlog", thrift.EXCEPTION, seqId)
+ x.Write(oprot)
+ oprot.WriteMessageEnd()
+ oprot.Flush(ctx)
+ return false, err
+ }
+
+ iprot.ReadMessageEnd()
+ var err2 error
+ result := BackendServiceIngestBinlogResult{}
+ var retval *TIngestBinlogResult_
+ if retval, err2 = p.handler.IngestBinlog(ctx, args.IngestBinlogRequest); err2 != nil {
+ x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing ingest_binlog: "+err2.Error())
+ oprot.WriteMessageBegin("ingest_binlog", thrift.EXCEPTION, seqId)
+ x.Write(oprot)
+ oprot.WriteMessageEnd()
+ oprot.Flush(ctx)
+ return true, err2
+ } else {
+ result.Success = retval
+ }
+ if err2 = oprot.WriteMessageBegin("ingest_binlog", thrift.REPLY, seqId); err2 != nil {
+ err = err2
+ }
+ if err2 = result.Write(oprot); err == nil && err2 != nil {
+ err = err2
+ }
+ if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil {
+ err = err2
+ }
+ if err2 = oprot.Flush(ctx); err == nil && err2 != nil {
+ err = err2
+ }
+ if err != nil {
+ return
+ }
+ return true, err
+}
+
+type backendServiceProcessorQueryIngestBinlog struct {
+ handler BackendService
+}
+
+func (p *backendServiceProcessorQueryIngestBinlog) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
+ args := BackendServiceQueryIngestBinlogArgs{}
+ if err = args.Read(iprot); err != nil {
+ iprot.ReadMessageEnd()
+ x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error())
+ oprot.WriteMessageBegin("query_ingest_binlog", thrift.EXCEPTION, seqId)
+ x.Write(oprot)
+ oprot.WriteMessageEnd()
+ oprot.Flush(ctx)
+ return false, err
+ }
+
+ iprot.ReadMessageEnd()
+ var err2 error
+ result := BackendServiceQueryIngestBinlogResult{}
+ var retval *TQueryIngestBinlogResult_
+ if retval, err2 = p.handler.QueryIngestBinlog(ctx, args.QueryIngestBinlogRequest); err2 != nil {
+ x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing query_ingest_binlog: "+err2.Error())
+ oprot.WriteMessageBegin("query_ingest_binlog", thrift.EXCEPTION, seqId)
+ x.Write(oprot)
+ oprot.WriteMessageEnd()
+ oprot.Flush(ctx)
+ return true, err2
+ } else {
+ result.Success = retval
+ }
+ if err2 = oprot.WriteMessageBegin("query_ingest_binlog", thrift.REPLY, seqId); err2 != nil {
+ err = err2
+ }
+ if err2 = result.Write(oprot); err == nil && err2 != nil {
+ err = err2
+ }
+ if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil {
+ err = err2
+ }
+ if err2 = oprot.Flush(ctx); err == nil && err2 != nil {
+ err = err2
+ }
+ if err != nil {
+ return
+ }
+ return true, err
+}
+
+type backendServiceProcessorPublishTopicInfo struct {
+ handler BackendService
+}
+
+func (p *backendServiceProcessorPublishTopicInfo) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
+ args := BackendServicePublishTopicInfoArgs{}
+ if err = args.Read(iprot); err != nil {
+ iprot.ReadMessageEnd()
+ x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error())
+ oprot.WriteMessageBegin("publish_topic_info", thrift.EXCEPTION, seqId)
+ x.Write(oprot)
+ oprot.WriteMessageEnd()
+ oprot.Flush(ctx)
+ return false, err
+ }
+
+ iprot.ReadMessageEnd()
+ var err2 error
+ result := BackendServicePublishTopicInfoResult{}
+ var retval *TPublishTopicResult_
+ if retval, err2 = p.handler.PublishTopicInfo(ctx, args.TopicRequest); err2 != nil {
+ x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing publish_topic_info: "+err2.Error())
+ oprot.WriteMessageBegin("publish_topic_info", thrift.EXCEPTION, seqId)
+ x.Write(oprot)
+ oprot.WriteMessageEnd()
+ oprot.Flush(ctx)
+ return true, err2
+ } else {
+ result.Success = retval
+ }
+ if err2 = oprot.WriteMessageBegin("publish_topic_info", thrift.REPLY, seqId); err2 != nil {
+ err = err2
+ }
+ if err2 = result.Write(oprot); err == nil && err2 != nil {
+ err = err2
+ }
+ if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil {
+ err = err2
+ }
+ if err2 = oprot.Flush(ctx); err == nil && err2 != nil {
+ err = err2
+ }
+ if err != nil {
+ return
+ }
+ return true, err
+}
+
+type backendServiceProcessorGetRealtimeExecStatus struct {
+ handler BackendService
+}
+
+func (p *backendServiceProcessorGetRealtimeExecStatus) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
+ args := BackendServiceGetRealtimeExecStatusArgs{}
+ if err = args.Read(iprot); err != nil {
+ iprot.ReadMessageEnd()
+ x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error())
+ oprot.WriteMessageBegin("get_realtime_exec_status", thrift.EXCEPTION, seqId)
+ x.Write(oprot)
+ oprot.WriteMessageEnd()
+ oprot.Flush(ctx)
+ return false, err
+ }
+
+ iprot.ReadMessageEnd()
+ var err2 error
+ result := BackendServiceGetRealtimeExecStatusResult{}
+ var retval *TGetRealtimeExecStatusResponse
+ if retval, err2 = p.handler.GetRealtimeExecStatus(ctx, args.Request); err2 != nil {
+ x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing get_realtime_exec_status: "+err2.Error())
+ oprot.WriteMessageBegin("get_realtime_exec_status", thrift.EXCEPTION, seqId)
+ x.Write(oprot)
+ oprot.WriteMessageEnd()
+ oprot.Flush(ctx)
+ return true, err2
+ } else {
+ result.Success = retval
+ }
+ if err2 = oprot.WriteMessageBegin("get_realtime_exec_status", thrift.REPLY, seqId); err2 != nil {
+ err = err2
+ }
+ if err2 = result.Write(oprot); err == nil && err2 != nil {
+ err = err2
+ }
+ if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil {
+ err = err2
+ }
+ if err2 = oprot.Flush(ctx); err == nil && err2 != nil {
+ err = err2
+ }
+ if err != nil {
+ return
+ }
+ return true, err
+}
+
+type BackendServiceExecPlanFragmentArgs struct {
+ Params *palointernalservice.TExecPlanFragmentParams `thrift:"params,1" frugal:"1,default,palointernalservice.TExecPlanFragmentParams" json:"params"`
+}
+
+func NewBackendServiceExecPlanFragmentArgs() *BackendServiceExecPlanFragmentArgs {
+ return &BackendServiceExecPlanFragmentArgs{}
+}
+
+func (p *BackendServiceExecPlanFragmentArgs) InitDefault() {
+}
+
+var BackendServiceExecPlanFragmentArgs_Params_DEFAULT *palointernalservice.TExecPlanFragmentParams
+
+func (p *BackendServiceExecPlanFragmentArgs) GetParams() (v *palointernalservice.TExecPlanFragmentParams) {
+ if !p.IsSetParams() {
+ return BackendServiceExecPlanFragmentArgs_Params_DEFAULT
+ }
+ return p.Params
+}
+func (p *BackendServiceExecPlanFragmentArgs) SetParams(val *palointernalservice.TExecPlanFragmentParams) {
+ p.Params = val
+}
+
+var fieldIDToName_BackendServiceExecPlanFragmentArgs = map[int16]string{
+ 1: "params",
+}
+
+func (p *BackendServiceExecPlanFragmentArgs) IsSetParams() bool {
+ return p.Params != nil
+}
+
+func (p *BackendServiceExecPlanFragmentArgs) Read(iprot thrift.TProtocol) (err error) {
+
+ var fieldTypeId thrift.TType
+ var fieldId int16
+
+ if _, err = iprot.ReadStructBegin(); err != nil {
+ goto ReadStructBeginError
+ }
+
+ for {
+ _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin()
+ if err != nil {
+ goto ReadFieldBeginError
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+
+ switch fieldId {
+ case 1:
+ if fieldTypeId == thrift.STRUCT {
+ if err = p.ReadField1(iprot); err != nil {
+ goto ReadFieldError
+ }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ default:
+ if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ }
+ if err = iprot.ReadFieldEnd(); err != nil {
+ goto ReadFieldEndError
+ }
+ }
+ if err = iprot.ReadStructEnd(); err != nil {
+ goto ReadStructEndError
+ }
+
+ return nil
+ReadStructBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err)
+ReadFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
+ReadFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceExecPlanFragmentArgs[fieldId]), err)
+SkipFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
+
+ReadFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err)
+ReadStructEndError:
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+}
+
+func (p *BackendServiceExecPlanFragmentArgs) ReadField1(iprot thrift.TProtocol) error {
+ _field := palointernalservice.NewTExecPlanFragmentParams()
+ if err := _field.Read(iprot); err != nil {
+ return err
+ }
+ p.Params = _field
+ return nil
+}
+
+func (p *BackendServiceExecPlanFragmentArgs) Write(oprot thrift.TProtocol) (err error) {
+ var fieldId int16
+ if err = oprot.WriteStructBegin("exec_plan_fragment_args"); err != nil {
+ goto WriteStructBeginError
+ }
+ if p != nil {
+ if err = p.writeField1(oprot); err != nil {
+ fieldId = 1
+ goto WriteFieldError
+ }
+ }
+ if err = oprot.WriteFieldStop(); err != nil {
+ goto WriteFieldStopError
+ }
+ if err = oprot.WriteStructEnd(); err != nil {
+ goto WriteStructEndError
+ }
+ return nil
+WriteStructBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+WriteFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err)
+WriteFieldStopError:
+ return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err)
+WriteStructEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err)
+}
+
+func (p *BackendServiceExecPlanFragmentArgs) writeField1(oprot thrift.TProtocol) (err error) {
+ if err = oprot.WriteFieldBegin("params", thrift.STRUCT, 1); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := p.Params.Write(oprot); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err)
+}
+
+func (p *BackendServiceExecPlanFragmentArgs) String() string {
+ if p == nil {
+ return ""
+ }
+ return fmt.Sprintf("BackendServiceExecPlanFragmentArgs(%+v)", *p)
+
+}
+
+func (p *BackendServiceExecPlanFragmentArgs) DeepEqual(ano *BackendServiceExecPlanFragmentArgs) bool {
+ if p == ano {
+ return true
+ } else if p == nil || ano == nil {
+ return false
+ }
+ if !p.Field1DeepEqual(ano.Params) {
+ return false
+ }
+ return true
+}
+
+func (p *BackendServiceExecPlanFragmentArgs) Field1DeepEqual(src *palointernalservice.TExecPlanFragmentParams) bool {
+
+ if !p.Params.DeepEqual(src) {
+ return false
+ }
+ return true
+}
+
+type BackendServiceExecPlanFragmentResult struct {
+ Success *palointernalservice.TExecPlanFragmentResult_ `thrift:"success,0,optional" frugal:"0,optional,palointernalservice.TExecPlanFragmentResult_" json:"success,omitempty"`
+}
+
+func NewBackendServiceExecPlanFragmentResult() *BackendServiceExecPlanFragmentResult {
+ return &BackendServiceExecPlanFragmentResult{}
+}
+
+func (p *BackendServiceExecPlanFragmentResult) InitDefault() {
+}
+
+var BackendServiceExecPlanFragmentResult_Success_DEFAULT *palointernalservice.TExecPlanFragmentResult_
+
+func (p *BackendServiceExecPlanFragmentResult) GetSuccess() (v *palointernalservice.TExecPlanFragmentResult_) {
+ if !p.IsSetSuccess() {
+ return BackendServiceExecPlanFragmentResult_Success_DEFAULT
+ }
+ return p.Success
+}
+func (p *BackendServiceExecPlanFragmentResult) SetSuccess(x interface{}) {
+ p.Success = x.(*palointernalservice.TExecPlanFragmentResult_)
+}
+
+var fieldIDToName_BackendServiceExecPlanFragmentResult = map[int16]string{
+ 0: "success",
+}
+
+func (p *BackendServiceExecPlanFragmentResult) IsSetSuccess() bool {
+ return p.Success != nil
+}
+
+func (p *BackendServiceExecPlanFragmentResult) Read(iprot thrift.TProtocol) (err error) {
+
+ var fieldTypeId thrift.TType
+ var fieldId int16
+
+ if _, err = iprot.ReadStructBegin(); err != nil {
+ goto ReadStructBeginError
+ }
+
+ for {
+ _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin()
+ if err != nil {
+ goto ReadFieldBeginError
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+
+ switch fieldId {
+ case 0:
+ if fieldTypeId == thrift.STRUCT {
+ if err = p.ReadField0(iprot); err != nil {
+ goto ReadFieldError
+ }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ default:
+ if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ }
+ if err = iprot.ReadFieldEnd(); err != nil {
+ goto ReadFieldEndError
+ }
+ }
+ if err = iprot.ReadStructEnd(); err != nil {
+ goto ReadStructEndError
+ }
+
+ return nil
+ReadStructBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err)
+ReadFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
+ReadFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceExecPlanFragmentResult[fieldId]), err)
+SkipFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
+
+ReadFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err)
+ReadStructEndError:
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+}
+
+func (p *BackendServiceExecPlanFragmentResult) ReadField0(iprot thrift.TProtocol) error {
+ _field := palointernalservice.NewTExecPlanFragmentResult_()
+ if err := _field.Read(iprot); err != nil {
+ return err
+ }
+ p.Success = _field
+ return nil
+}
+
+func (p *BackendServiceExecPlanFragmentResult) Write(oprot thrift.TProtocol) (err error) {
+ var fieldId int16
+ if err = oprot.WriteStructBegin("exec_plan_fragment_result"); err != nil {
+ goto WriteStructBeginError
+ }
+ if p != nil {
+ if err = p.writeField0(oprot); err != nil {
+ fieldId = 0
+ goto WriteFieldError
+ }
+ }
+ if err = oprot.WriteFieldStop(); err != nil {
+ goto WriteFieldStopError
+ }
+ if err = oprot.WriteStructEnd(); err != nil {
+ goto WriteStructEndError
+ }
+ return nil
+WriteStructBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+WriteFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err)
+WriteFieldStopError:
+ return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err)
+WriteStructEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err)
+}
+
+func (p *BackendServiceExecPlanFragmentResult) writeField0(oprot thrift.TProtocol) (err error) {
+ if p.IsSetSuccess() {
+ if err = oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := p.Success.Write(oprot); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 0 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 0 end error: ", p), err)
+}
+
+func (p *BackendServiceExecPlanFragmentResult) String() string {
+ if p == nil {
+ return ""
+ }
+ return fmt.Sprintf("BackendServiceExecPlanFragmentResult(%+v)", *p)
+
+}
+
+func (p *BackendServiceExecPlanFragmentResult) DeepEqual(ano *BackendServiceExecPlanFragmentResult) bool {
+ if p == ano {
+ return true
+ } else if p == nil || ano == nil {
+ return false
+ }
+ if !p.Field0DeepEqual(ano.Success) {
+ return false
+ }
+ return true
+}
+
+func (p *BackendServiceExecPlanFragmentResult) Field0DeepEqual(src *palointernalservice.TExecPlanFragmentResult_) bool {
+
+ if !p.Success.DeepEqual(src) {
+ return false
+ }
+ return true
+}
+
+type BackendServiceCancelPlanFragmentArgs struct {
+ Params *palointernalservice.TCancelPlanFragmentParams `thrift:"params,1" frugal:"1,default,palointernalservice.TCancelPlanFragmentParams" json:"params"`
+}
+
+func NewBackendServiceCancelPlanFragmentArgs() *BackendServiceCancelPlanFragmentArgs {
+ return &BackendServiceCancelPlanFragmentArgs{}
+}
+
+func (p *BackendServiceCancelPlanFragmentArgs) InitDefault() {
+}
+
+var BackendServiceCancelPlanFragmentArgs_Params_DEFAULT *palointernalservice.TCancelPlanFragmentParams
+
+func (p *BackendServiceCancelPlanFragmentArgs) GetParams() (v *palointernalservice.TCancelPlanFragmentParams) {
+ if !p.IsSetParams() {
+ return BackendServiceCancelPlanFragmentArgs_Params_DEFAULT
+ }
+ return p.Params
+}
+func (p *BackendServiceCancelPlanFragmentArgs) SetParams(val *palointernalservice.TCancelPlanFragmentParams) {
+ p.Params = val
+}
+
+var fieldIDToName_BackendServiceCancelPlanFragmentArgs = map[int16]string{
+ 1: "params",
+}
+
+func (p *BackendServiceCancelPlanFragmentArgs) IsSetParams() bool {
+ return p.Params != nil
+}
+
+func (p *BackendServiceCancelPlanFragmentArgs) Read(iprot thrift.TProtocol) (err error) {
+
+ var fieldTypeId thrift.TType
+ var fieldId int16
+
+ if _, err = iprot.ReadStructBegin(); err != nil {
+ goto ReadStructBeginError
+ }
+
+ for {
+ _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin()
+ if err != nil {
+ goto ReadFieldBeginError
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+
+ switch fieldId {
+ case 1:
+ if fieldTypeId == thrift.STRUCT {
+ if err = p.ReadField1(iprot); err != nil {
+ goto ReadFieldError
+ }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ default:
+ if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ }
+ if err = iprot.ReadFieldEnd(); err != nil {
+ goto ReadFieldEndError
+ }
+ }
+ if err = iprot.ReadStructEnd(); err != nil {
+ goto ReadStructEndError
+ }
+
+ return nil
+ReadStructBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err)
+ReadFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
+ReadFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceCancelPlanFragmentArgs[fieldId]), err)
+SkipFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
+
+ReadFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err)
+ReadStructEndError:
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+}
+
+func (p *BackendServiceCancelPlanFragmentArgs) ReadField1(iprot thrift.TProtocol) error {
+ _field := palointernalservice.NewTCancelPlanFragmentParams()
+ if err := _field.Read(iprot); err != nil {
+ return err
+ }
+ p.Params = _field
+ return nil
+}
+
+func (p *BackendServiceCancelPlanFragmentArgs) Write(oprot thrift.TProtocol) (err error) {
+ var fieldId int16
+ if err = oprot.WriteStructBegin("cancel_plan_fragment_args"); err != nil {
+ goto WriteStructBeginError
+ }
+ if p != nil {
+ if err = p.writeField1(oprot); err != nil {
+ fieldId = 1
+ goto WriteFieldError
+ }
+ }
+ if err = oprot.WriteFieldStop(); err != nil {
+ goto WriteFieldStopError
+ }
+ if err = oprot.WriteStructEnd(); err != nil {
+ goto WriteStructEndError
+ }
+ return nil
+WriteStructBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+WriteFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err)
+WriteFieldStopError:
+ return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err)
+WriteStructEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err)
+}
+
+func (p *BackendServiceCancelPlanFragmentArgs) writeField1(oprot thrift.TProtocol) (err error) {
+ if err = oprot.WriteFieldBegin("params", thrift.STRUCT, 1); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := p.Params.Write(oprot); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err)
+}
+
+func (p *BackendServiceCancelPlanFragmentArgs) String() string {
+ if p == nil {
+ return ""
+ }
+ return fmt.Sprintf("BackendServiceCancelPlanFragmentArgs(%+v)", *p)
+
+}
+
+func (p *BackendServiceCancelPlanFragmentArgs) DeepEqual(ano *BackendServiceCancelPlanFragmentArgs) bool {
+ if p == ano {
+ return true
+ } else if p == nil || ano == nil {
+ return false
+ }
+ if !p.Field1DeepEqual(ano.Params) {
+ return false
+ }
+ return true
+}
+
+func (p *BackendServiceCancelPlanFragmentArgs) Field1DeepEqual(src *palointernalservice.TCancelPlanFragmentParams) bool {
+
+ if !p.Params.DeepEqual(src) {
+ return false
+ }
+ return true
+}
+
+type BackendServiceCancelPlanFragmentResult struct {
+ Success *palointernalservice.TCancelPlanFragmentResult_ `thrift:"success,0,optional" frugal:"0,optional,palointernalservice.TCancelPlanFragmentResult_" json:"success,omitempty"`
+}
+
+func NewBackendServiceCancelPlanFragmentResult() *BackendServiceCancelPlanFragmentResult {
+ return &BackendServiceCancelPlanFragmentResult{}
+}
+
+func (p *BackendServiceCancelPlanFragmentResult) InitDefault() {
+}
+
+var BackendServiceCancelPlanFragmentResult_Success_DEFAULT *palointernalservice.TCancelPlanFragmentResult_
+
+func (p *BackendServiceCancelPlanFragmentResult) GetSuccess() (v *palointernalservice.TCancelPlanFragmentResult_) {
+ if !p.IsSetSuccess() {
+ return BackendServiceCancelPlanFragmentResult_Success_DEFAULT
+ }
+ return p.Success
+}
+func (p *BackendServiceCancelPlanFragmentResult) SetSuccess(x interface{}) {
+ p.Success = x.(*palointernalservice.TCancelPlanFragmentResult_)
+}
+
+var fieldIDToName_BackendServiceCancelPlanFragmentResult = map[int16]string{
+ 0: "success",
+}
+
+func (p *BackendServiceCancelPlanFragmentResult) IsSetSuccess() bool {
+ return p.Success != nil
+}
+
+func (p *BackendServiceCancelPlanFragmentResult) Read(iprot thrift.TProtocol) (err error) {
+
+ var fieldTypeId thrift.TType
+ var fieldId int16
+
+ if _, err = iprot.ReadStructBegin(); err != nil {
+ goto ReadStructBeginError
+ }
+
+ for {
+ _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin()
+ if err != nil {
+ goto ReadFieldBeginError
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+
+ switch fieldId {
+ case 0:
+ if fieldTypeId == thrift.STRUCT {
+ if err = p.ReadField0(iprot); err != nil {
+ goto ReadFieldError
+ }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ default:
+ if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ }
+ if err = iprot.ReadFieldEnd(); err != nil {
+ goto ReadFieldEndError
+ }
+ }
+ if err = iprot.ReadStructEnd(); err != nil {
+ goto ReadStructEndError
+ }
+
+ return nil
+ReadStructBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err)
+ReadFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
+ReadFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceCancelPlanFragmentResult[fieldId]), err)
+SkipFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
+
+ReadFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err)
+ReadStructEndError:
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+}
+
+func (p *BackendServiceCancelPlanFragmentResult) ReadField0(iprot thrift.TProtocol) error {
+ _field := palointernalservice.NewTCancelPlanFragmentResult_()
+ if err := _field.Read(iprot); err != nil {
+ return err
+ }
+ p.Success = _field
+ return nil
+}
+
+func (p *BackendServiceCancelPlanFragmentResult) Write(oprot thrift.TProtocol) (err error) {
+ var fieldId int16
+ if err = oprot.WriteStructBegin("cancel_plan_fragment_result"); err != nil {
+ goto WriteStructBeginError
+ }
+ if p != nil {
+ if err = p.writeField0(oprot); err != nil {
+ fieldId = 0
+ goto WriteFieldError
+ }
+ }
+ if err = oprot.WriteFieldStop(); err != nil {
+ goto WriteFieldStopError
+ }
+ if err = oprot.WriteStructEnd(); err != nil {
+ goto WriteStructEndError
+ }
+ return nil
+WriteStructBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+WriteFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err)
+WriteFieldStopError:
+ return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err)
+WriteStructEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err)
+}
+
+func (p *BackendServiceCancelPlanFragmentResult) writeField0(oprot thrift.TProtocol) (err error) {
+ if p.IsSetSuccess() {
+ if err = oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := p.Success.Write(oprot); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 0 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 0 end error: ", p), err)
+}
+
+func (p *BackendServiceCancelPlanFragmentResult) String() string {
+ if p == nil {
+ return ""
+ }
+ return fmt.Sprintf("BackendServiceCancelPlanFragmentResult(%+v)", *p)
+
+}
+
+func (p *BackendServiceCancelPlanFragmentResult) DeepEqual(ano *BackendServiceCancelPlanFragmentResult) bool {
+ if p == ano {
+ return true
+ } else if p == nil || ano == nil {
+ return false
+ }
+ if !p.Field0DeepEqual(ano.Success) {
+ return false
+ }
+ return true
+}
+
+func (p *BackendServiceCancelPlanFragmentResult) Field0DeepEqual(src *palointernalservice.TCancelPlanFragmentResult_) bool {
+
+ if !p.Success.DeepEqual(src) {
+ return false
+ }
+ return true
+}
+
+type BackendServiceTransmitDataArgs struct {
+ Params *palointernalservice.TTransmitDataParams `thrift:"params,1" frugal:"1,default,palointernalservice.TTransmitDataParams" json:"params"`
+}
+
+func NewBackendServiceTransmitDataArgs() *BackendServiceTransmitDataArgs {
+ return &BackendServiceTransmitDataArgs{}
+}
+
+func (p *BackendServiceTransmitDataArgs) InitDefault() {
+}
+
+var BackendServiceTransmitDataArgs_Params_DEFAULT *palointernalservice.TTransmitDataParams
+
+func (p *BackendServiceTransmitDataArgs) GetParams() (v *palointernalservice.TTransmitDataParams) {
+ if !p.IsSetParams() {
+ return BackendServiceTransmitDataArgs_Params_DEFAULT
+ }
+ return p.Params
+}
+func (p *BackendServiceTransmitDataArgs) SetParams(val *palointernalservice.TTransmitDataParams) {
+ p.Params = val
+}
+
+var fieldIDToName_BackendServiceTransmitDataArgs = map[int16]string{
+ 1: "params",
+}
+
+func (p *BackendServiceTransmitDataArgs) IsSetParams() bool {
+ return p.Params != nil
+}
+
+func (p *BackendServiceTransmitDataArgs) Read(iprot thrift.TProtocol) (err error) {
+
+ var fieldTypeId thrift.TType
+ var fieldId int16
+
+ if _, err = iprot.ReadStructBegin(); err != nil {
+ goto ReadStructBeginError
+ }
+
+ for {
+ _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin()
+ if err != nil {
+ goto ReadFieldBeginError
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+
+ switch fieldId {
+ case 1:
+ if fieldTypeId == thrift.STRUCT {
+ if err = p.ReadField1(iprot); err != nil {
+ goto ReadFieldError
+ }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ default:
+ if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ }
+ if err = iprot.ReadFieldEnd(); err != nil {
+ goto ReadFieldEndError
+ }
+ }
+ if err = iprot.ReadStructEnd(); err != nil {
+ goto ReadStructEndError
+ }
+
+ return nil
+ReadStructBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err)
+ReadFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
+ReadFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceTransmitDataArgs[fieldId]), err)
+SkipFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
+
+ReadFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err)
+ReadStructEndError:
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+}
+
+func (p *BackendServiceTransmitDataArgs) ReadField1(iprot thrift.TProtocol) error {
+ _field := palointernalservice.NewTTransmitDataParams()
+ if err := _field.Read(iprot); err != nil {
+ return err
+ }
+ p.Params = _field
+ return nil
+}
+
+func (p *BackendServiceTransmitDataArgs) Write(oprot thrift.TProtocol) (err error) {
+ var fieldId int16
+ if err = oprot.WriteStructBegin("transmit_data_args"); err != nil {
+ goto WriteStructBeginError
+ }
+ if p != nil {
+ if err = p.writeField1(oprot); err != nil {
+ fieldId = 1
+ goto WriteFieldError
+ }
+ }
+ if err = oprot.WriteFieldStop(); err != nil {
+ goto WriteFieldStopError
+ }
+ if err = oprot.WriteStructEnd(); err != nil {
+ goto WriteStructEndError
+ }
+ return nil
+WriteStructBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+WriteFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err)
+WriteFieldStopError:
+ return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err)
+WriteStructEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err)
+}
+
+func (p *BackendServiceTransmitDataArgs) writeField1(oprot thrift.TProtocol) (err error) {
+ if err = oprot.WriteFieldBegin("params", thrift.STRUCT, 1); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := p.Params.Write(oprot); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err)
+}
+
+func (p *BackendServiceTransmitDataArgs) String() string {
+ if p == nil {
+ return ""
+ }
+ return fmt.Sprintf("BackendServiceTransmitDataArgs(%+v)", *p)
+
+}
+
+func (p *BackendServiceTransmitDataArgs) DeepEqual(ano *BackendServiceTransmitDataArgs) bool {
+ if p == ano {
+ return true
+ } else if p == nil || ano == nil {
+ return false
+ }
+ if !p.Field1DeepEqual(ano.Params) {
+ return false
+ }
+ return true
+}
+
+func (p *BackendServiceTransmitDataArgs) Field1DeepEqual(src *palointernalservice.TTransmitDataParams) bool {
+
+ if !p.Params.DeepEqual(src) {
+ return false
+ }
+ return true
+}
+
+type BackendServiceTransmitDataResult struct {
+ Success *palointernalservice.TTransmitDataResult_ `thrift:"success,0,optional" frugal:"0,optional,palointernalservice.TTransmitDataResult_" json:"success,omitempty"`
+}
+
+func NewBackendServiceTransmitDataResult() *BackendServiceTransmitDataResult {
+ return &BackendServiceTransmitDataResult{}
+}
+
+func (p *BackendServiceTransmitDataResult) InitDefault() {
+}
+
+var BackendServiceTransmitDataResult_Success_DEFAULT *palointernalservice.TTransmitDataResult_
+
+func (p *BackendServiceTransmitDataResult) GetSuccess() (v *palointernalservice.TTransmitDataResult_) {
+ if !p.IsSetSuccess() {
+ return BackendServiceTransmitDataResult_Success_DEFAULT
+ }
+ return p.Success
+}
+func (p *BackendServiceTransmitDataResult) SetSuccess(x interface{}) {
+ p.Success = x.(*palointernalservice.TTransmitDataResult_)
+}
+
+var fieldIDToName_BackendServiceTransmitDataResult = map[int16]string{
+ 0: "success",
+}
+
+func (p *BackendServiceTransmitDataResult) IsSetSuccess() bool {
+ return p.Success != nil
+}
+
+func (p *BackendServiceTransmitDataResult) Read(iprot thrift.TProtocol) (err error) {
+
+ var fieldTypeId thrift.TType
+ var fieldId int16
+
+ if _, err = iprot.ReadStructBegin(); err != nil {
+ goto ReadStructBeginError
+ }
+
+ for {
+ _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin()
+ if err != nil {
+ goto ReadFieldBeginError
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+
+ switch fieldId {
+ case 0:
+ if fieldTypeId == thrift.STRUCT {
+ if err = p.ReadField0(iprot); err != nil {
+ goto ReadFieldError
+ }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ default:
+ if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ }
+ if err = iprot.ReadFieldEnd(); err != nil {
+ goto ReadFieldEndError
+ }
+ }
+ if err = iprot.ReadStructEnd(); err != nil {
+ goto ReadStructEndError
+ }
+
+ return nil
+ReadStructBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err)
+ReadFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
+ReadFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceTransmitDataResult[fieldId]), err)
+SkipFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
+
+ReadFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err)
+ReadStructEndError:
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+}
+
+func (p *BackendServiceTransmitDataResult) ReadField0(iprot thrift.TProtocol) error {
+ _field := palointernalservice.NewTTransmitDataResult_()
+ if err := _field.Read(iprot); err != nil {
+ return err
+ }
+ p.Success = _field
+ return nil
+}
+
+func (p *BackendServiceTransmitDataResult) Write(oprot thrift.TProtocol) (err error) {
+ var fieldId int16
+ if err = oprot.WriteStructBegin("transmit_data_result"); err != nil {
+ goto WriteStructBeginError
+ }
+ if p != nil {
+ if err = p.writeField0(oprot); err != nil {
+ fieldId = 0
+ goto WriteFieldError
+ }
+ }
+ if err = oprot.WriteFieldStop(); err != nil {
+ goto WriteFieldStopError
+ }
+ if err = oprot.WriteStructEnd(); err != nil {
+ goto WriteStructEndError
+ }
+ return nil
+WriteStructBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+WriteFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err)
+WriteFieldStopError:
+ return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err)
+WriteStructEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err)
+}
+
+func (p *BackendServiceTransmitDataResult) writeField0(oprot thrift.TProtocol) (err error) {
+ if p.IsSetSuccess() {
+ if err = oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := p.Success.Write(oprot); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 0 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 0 end error: ", p), err)
+}
+
+func (p *BackendServiceTransmitDataResult) String() string {
+ if p == nil {
+ return ""
+ }
+ return fmt.Sprintf("BackendServiceTransmitDataResult(%+v)", *p)
+
+}
+
+func (p *BackendServiceTransmitDataResult) DeepEqual(ano *BackendServiceTransmitDataResult) bool {
+ if p == ano {
+ return true
+ } else if p == nil || ano == nil {
+ return false
+ }
+ if !p.Field0DeepEqual(ano.Success) {
+ return false
+ }
+ return true
+}
+
+func (p *BackendServiceTransmitDataResult) Field0DeepEqual(src *palointernalservice.TTransmitDataResult_) bool {
+
+ if !p.Success.DeepEqual(src) {
+ return false
+ }
+ return true
+}
+
+type BackendServiceSubmitTasksArgs struct {
+ Tasks []*agentservice.TAgentTaskRequest `thrift:"tasks,1" frugal:"1,default,list" json:"tasks"`
+}
+
+func NewBackendServiceSubmitTasksArgs() *BackendServiceSubmitTasksArgs {
+ return &BackendServiceSubmitTasksArgs{}
+}
+
+func (p *BackendServiceSubmitTasksArgs) InitDefault() {
+}
+
+func (p *BackendServiceSubmitTasksArgs) GetTasks() (v []*agentservice.TAgentTaskRequest) {
+ return p.Tasks
+}
+func (p *BackendServiceSubmitTasksArgs) SetTasks(val []*agentservice.TAgentTaskRequest) {
+ p.Tasks = val
+}
+
+var fieldIDToName_BackendServiceSubmitTasksArgs = map[int16]string{
+ 1: "tasks",
+}
+
+func (p *BackendServiceSubmitTasksArgs) Read(iprot thrift.TProtocol) (err error) {
+
+ var fieldTypeId thrift.TType
+ var fieldId int16
+
+ if _, err = iprot.ReadStructBegin(); err != nil {
+ goto ReadStructBeginError
+ }
+
+ for {
+ _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin()
+ if err != nil {
+ goto ReadFieldBeginError
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+
+ switch fieldId {
+ case 1:
+ if fieldTypeId == thrift.LIST {
+ if err = p.ReadField1(iprot); err != nil {
+ goto ReadFieldError
+ }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ default:
+ if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ }
+ if err = iprot.ReadFieldEnd(); err != nil {
+ goto ReadFieldEndError
+ }
+ }
+ if err = iprot.ReadStructEnd(); err != nil {
+ goto ReadStructEndError
+ }
+
+ return nil
+ReadStructBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err)
+ReadFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
+ReadFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceSubmitTasksArgs[fieldId]), err)
+SkipFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
+
+ReadFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err)
+ReadStructEndError:
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+}
+
+func (p *BackendServiceSubmitTasksArgs) ReadField1(iprot thrift.TProtocol) error {
+ _, size, err := iprot.ReadListBegin()
+ if err != nil {
+ return err
+ }
+ _field := make([]*agentservice.TAgentTaskRequest, 0, size)
+ values := make([]agentservice.TAgentTaskRequest, size)
+ for i := 0; i < size; i++ {
+ _elem := &values[i]
+ _elem.InitDefault()
+
+ if err := _elem.Read(iprot); err != nil {
+ return err
+ }
+
+ _field = append(_field, _elem)
+ }
+ if err := iprot.ReadListEnd(); err != nil {
+ return err
+ }
+ p.Tasks = _field
+ return nil
+}
+
+func (p *BackendServiceSubmitTasksArgs) Write(oprot thrift.TProtocol) (err error) {
+ var fieldId int16
+ if err = oprot.WriteStructBegin("submit_tasks_args"); err != nil {
+ goto WriteStructBeginError
+ }
+ if p != nil {
+ if err = p.writeField1(oprot); err != nil {
+ fieldId = 1
+ goto WriteFieldError
+ }
+ }
+ if err = oprot.WriteFieldStop(); err != nil {
+ goto WriteFieldStopError
+ }
+ if err = oprot.WriteStructEnd(); err != nil {
+ goto WriteStructEndError
+ }
+ return nil
+WriteStructBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+WriteFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err)
+WriteFieldStopError:
+ return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err)
+WriteStructEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err)
+}
+
+func (p *BackendServiceSubmitTasksArgs) writeField1(oprot thrift.TProtocol) (err error) {
+ if err = oprot.WriteFieldBegin("tasks", thrift.LIST, 1); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Tasks)); err != nil {
+ return err
+ }
+ for _, v := range p.Tasks {
+ if err := v.Write(oprot); err != nil {
+ return err
+ }
+ }
+ if err := oprot.WriteListEnd(); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err)
+}
+
+func (p *BackendServiceSubmitTasksArgs) String() string {
+ if p == nil {
+ return ""
+ }
+ return fmt.Sprintf("BackendServiceSubmitTasksArgs(%+v)", *p)
+
+}
+
+func (p *BackendServiceSubmitTasksArgs) DeepEqual(ano *BackendServiceSubmitTasksArgs) bool {
+ if p == ano {
+ return true
+ } else if p == nil || ano == nil {
+ return false
+ }
+ if !p.Field1DeepEqual(ano.Tasks) {
+ return false
+ }
+ return true
+}
+
+func (p *BackendServiceSubmitTasksArgs) Field1DeepEqual(src []*agentservice.TAgentTaskRequest) bool {
+
+ if len(p.Tasks) != len(src) {
+ return false
+ }
+ for i, v := range p.Tasks {
+ _src := src[i]
+ if !v.DeepEqual(_src) {
+ return false
+ }
+ }
+ return true
+}
+
+type BackendServiceSubmitTasksResult struct {
+ Success *agentservice.TAgentResult_ `thrift:"success,0,optional" frugal:"0,optional,agentservice.TAgentResult_" json:"success,omitempty"`
+}
+
+func NewBackendServiceSubmitTasksResult() *BackendServiceSubmitTasksResult {
+ return &BackendServiceSubmitTasksResult{}
+}
+
+func (p *BackendServiceSubmitTasksResult) InitDefault() {
+}
+
+var BackendServiceSubmitTasksResult_Success_DEFAULT *agentservice.TAgentResult_
+
+func (p *BackendServiceSubmitTasksResult) GetSuccess() (v *agentservice.TAgentResult_) {
+ if !p.IsSetSuccess() {
+ return BackendServiceSubmitTasksResult_Success_DEFAULT
+ }
+ return p.Success
+}
+func (p *BackendServiceSubmitTasksResult) SetSuccess(x interface{}) {
+ p.Success = x.(*agentservice.TAgentResult_)
+}
+
+var fieldIDToName_BackendServiceSubmitTasksResult = map[int16]string{
+ 0: "success",
+}
+
+func (p *BackendServiceSubmitTasksResult) IsSetSuccess() bool {
+ return p.Success != nil
+}
+
+func (p *BackendServiceSubmitTasksResult) Read(iprot thrift.TProtocol) (err error) {
+
+ var fieldTypeId thrift.TType
+ var fieldId int16
+
+ if _, err = iprot.ReadStructBegin(); err != nil {
+ goto ReadStructBeginError
+ }
+
+ for {
+ _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin()
+ if err != nil {
+ goto ReadFieldBeginError
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+
+ switch fieldId {
+ case 0:
+ if fieldTypeId == thrift.STRUCT {
+ if err = p.ReadField0(iprot); err != nil {
+ goto ReadFieldError
+ }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ default:
+ if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ }
+ if err = iprot.ReadFieldEnd(); err != nil {
+ goto ReadFieldEndError
+ }
+ }
+ if err = iprot.ReadStructEnd(); err != nil {
+ goto ReadStructEndError
+ }
+
+ return nil
+ReadStructBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err)
+ReadFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
+ReadFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceSubmitTasksResult[fieldId]), err)
+SkipFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
+
+ReadFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err)
+ReadStructEndError:
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+}
+
+func (p *BackendServiceSubmitTasksResult) ReadField0(iprot thrift.TProtocol) error {
+ _field := agentservice.NewTAgentResult_()
+ if err := _field.Read(iprot); err != nil {
+ return err
+ }
+ p.Success = _field
+ return nil
+}
+
+func (p *BackendServiceSubmitTasksResult) Write(oprot thrift.TProtocol) (err error) {
+ var fieldId int16
+ if err = oprot.WriteStructBegin("submit_tasks_result"); err != nil {
+ goto WriteStructBeginError
+ }
+ if p != nil {
+ if err = p.writeField0(oprot); err != nil {
+ fieldId = 0
+ goto WriteFieldError
+ }
+ }
+ if err = oprot.WriteFieldStop(); err != nil {
+ goto WriteFieldStopError
+ }
+ if err = oprot.WriteStructEnd(); err != nil {
+ goto WriteStructEndError
+ }
+ return nil
+WriteStructBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+WriteFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err)
+WriteFieldStopError:
+ return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err)
+WriteStructEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err)
+}
+
+func (p *BackendServiceSubmitTasksResult) writeField0(oprot thrift.TProtocol) (err error) {
+ if p.IsSetSuccess() {
+ if err = oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := p.Success.Write(oprot); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 0 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 0 end error: ", p), err)
+}
+
+func (p *BackendServiceSubmitTasksResult) String() string {
+ if p == nil {
+ return ""
+ }
+ return fmt.Sprintf("BackendServiceSubmitTasksResult(%+v)", *p)
+
+}
+
+func (p *BackendServiceSubmitTasksResult) DeepEqual(ano *BackendServiceSubmitTasksResult) bool {
+ if p == ano {
+ return true
+ } else if p == nil || ano == nil {
+ return false
+ }
+ if !p.Field0DeepEqual(ano.Success) {
+ return false
+ }
+ return true
+}
+
+func (p *BackendServiceSubmitTasksResult) Field0DeepEqual(src *agentservice.TAgentResult_) bool {
+
+ if !p.Success.DeepEqual(src) {
+ return false
+ }
+ return true
+}
+
+type BackendServiceMakeSnapshotArgs struct {
+ SnapshotRequest *agentservice.TSnapshotRequest `thrift:"snapshot_request,1" frugal:"1,default,agentservice.TSnapshotRequest" json:"snapshot_request"`
+}
+
+func NewBackendServiceMakeSnapshotArgs() *BackendServiceMakeSnapshotArgs {
+ return &BackendServiceMakeSnapshotArgs{}
+}
+
+func (p *BackendServiceMakeSnapshotArgs) InitDefault() {
+}
+
+var BackendServiceMakeSnapshotArgs_SnapshotRequest_DEFAULT *agentservice.TSnapshotRequest
+
+func (p *BackendServiceMakeSnapshotArgs) GetSnapshotRequest() (v *agentservice.TSnapshotRequest) {
+ if !p.IsSetSnapshotRequest() {
+ return BackendServiceMakeSnapshotArgs_SnapshotRequest_DEFAULT
+ }
+ return p.SnapshotRequest
+}
+func (p *BackendServiceMakeSnapshotArgs) SetSnapshotRequest(val *agentservice.TSnapshotRequest) {
+ p.SnapshotRequest = val
+}
+
+var fieldIDToName_BackendServiceMakeSnapshotArgs = map[int16]string{
+ 1: "snapshot_request",
+}
+
+func (p *BackendServiceMakeSnapshotArgs) IsSetSnapshotRequest() bool {
+ return p.SnapshotRequest != nil
+}
+
+func (p *BackendServiceMakeSnapshotArgs) Read(iprot thrift.TProtocol) (err error) {
+
+ var fieldTypeId thrift.TType
+ var fieldId int16
+
+ if _, err = iprot.ReadStructBegin(); err != nil {
+ goto ReadStructBeginError
+ }
+
+ for {
+ _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin()
+ if err != nil {
+ goto ReadFieldBeginError
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+
+ switch fieldId {
+ case 1:
+ if fieldTypeId == thrift.STRUCT {
+ if err = p.ReadField1(iprot); err != nil {
+ goto ReadFieldError
+ }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ default:
+ if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ }
+ if err = iprot.ReadFieldEnd(); err != nil {
+ goto ReadFieldEndError
+ }
+ }
+ if err = iprot.ReadStructEnd(); err != nil {
+ goto ReadStructEndError
+ }
+
+ return nil
+ReadStructBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err)
+ReadFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
+ReadFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceMakeSnapshotArgs[fieldId]), err)
+SkipFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
+
+ReadFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err)
+ReadStructEndError:
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+}
+
+func (p *BackendServiceMakeSnapshotArgs) ReadField1(iprot thrift.TProtocol) error {
+ _field := agentservice.NewTSnapshotRequest()
+ if err := _field.Read(iprot); err != nil {
+ return err
+ }
+ p.SnapshotRequest = _field
+ return nil
+}
+
+func (p *BackendServiceMakeSnapshotArgs) Write(oprot thrift.TProtocol) (err error) {
+ var fieldId int16
+ if err = oprot.WriteStructBegin("make_snapshot_args"); err != nil {
+ goto WriteStructBeginError
+ }
+ if p != nil {
+ if err = p.writeField1(oprot); err != nil {
+ fieldId = 1
+ goto WriteFieldError
+ }
+ }
+ if err = oprot.WriteFieldStop(); err != nil {
+ goto WriteFieldStopError
+ }
+ if err = oprot.WriteStructEnd(); err != nil {
+ goto WriteStructEndError
+ }
+ return nil
+WriteStructBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+WriteFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err)
+WriteFieldStopError:
+ return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err)
+WriteStructEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err)
+}
+
+func (p *BackendServiceMakeSnapshotArgs) writeField1(oprot thrift.TProtocol) (err error) {
+ if err = oprot.WriteFieldBegin("snapshot_request", thrift.STRUCT, 1); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := p.SnapshotRequest.Write(oprot); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err)
+}
+
+func (p *BackendServiceMakeSnapshotArgs) String() string {
+ if p == nil {
+ return ""
+ }
+ return fmt.Sprintf("BackendServiceMakeSnapshotArgs(%+v)", *p)
+
+}
+
+func (p *BackendServiceMakeSnapshotArgs) DeepEqual(ano *BackendServiceMakeSnapshotArgs) bool {
+ if p == ano {
+ return true
+ } else if p == nil || ano == nil {
+ return false
+ }
+ if !p.Field1DeepEqual(ano.SnapshotRequest) {
+ return false
+ }
+ return true
+}
+
+func (p *BackendServiceMakeSnapshotArgs) Field1DeepEqual(src *agentservice.TSnapshotRequest) bool {
+
+ if !p.SnapshotRequest.DeepEqual(src) {
+ return false
+ }
+ return true
+}
+
+type BackendServiceMakeSnapshotResult struct {
+ Success *agentservice.TAgentResult_ `thrift:"success,0,optional" frugal:"0,optional,agentservice.TAgentResult_" json:"success,omitempty"`
+}
+
+func NewBackendServiceMakeSnapshotResult() *BackendServiceMakeSnapshotResult {
+ return &BackendServiceMakeSnapshotResult{}
+}
+
+func (p *BackendServiceMakeSnapshotResult) InitDefault() {
+}
+
+var BackendServiceMakeSnapshotResult_Success_DEFAULT *agentservice.TAgentResult_
+
+func (p *BackendServiceMakeSnapshotResult) GetSuccess() (v *agentservice.TAgentResult_) {
+ if !p.IsSetSuccess() {
+ return BackendServiceMakeSnapshotResult_Success_DEFAULT
+ }
+ return p.Success
+}
+func (p *BackendServiceMakeSnapshotResult) SetSuccess(x interface{}) {
+ p.Success = x.(*agentservice.TAgentResult_)
+}
+
+var fieldIDToName_BackendServiceMakeSnapshotResult = map[int16]string{
+ 0: "success",
+}
+
+func (p *BackendServiceMakeSnapshotResult) IsSetSuccess() bool {
+ return p.Success != nil
+}
+
+func (p *BackendServiceMakeSnapshotResult) Read(iprot thrift.TProtocol) (err error) {
+
+ var fieldTypeId thrift.TType
+ var fieldId int16
+
+ if _, err = iprot.ReadStructBegin(); err != nil {
+ goto ReadStructBeginError
+ }
+
+ for {
+ _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin()
+ if err != nil {
+ goto ReadFieldBeginError
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+
+ switch fieldId {
+ case 0:
+ if fieldTypeId == thrift.STRUCT {
+ if err = p.ReadField0(iprot); err != nil {
+ goto ReadFieldError
+ }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ default:
+ if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ }
+ if err = iprot.ReadFieldEnd(); err != nil {
+ goto ReadFieldEndError
+ }
+ }
+ if err = iprot.ReadStructEnd(); err != nil {
+ goto ReadStructEndError
+ }
+
+ return nil
+ReadStructBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err)
+ReadFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
+ReadFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceMakeSnapshotResult[fieldId]), err)
+SkipFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
+
+ReadFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err)
+ReadStructEndError:
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+}
+
+func (p *BackendServiceMakeSnapshotResult) ReadField0(iprot thrift.TProtocol) error {
+ _field := agentservice.NewTAgentResult_()
+ if err := _field.Read(iprot); err != nil {
+ return err
+ }
+ p.Success = _field
+ return nil
+}
+
+func (p *BackendServiceMakeSnapshotResult) Write(oprot thrift.TProtocol) (err error) {
+ var fieldId int16
+ if err = oprot.WriteStructBegin("make_snapshot_result"); err != nil {
+ goto WriteStructBeginError
+ }
+ if p != nil {
+ if err = p.writeField0(oprot); err != nil {
+ fieldId = 0
+ goto WriteFieldError
+ }
+ }
+ if err = oprot.WriteFieldStop(); err != nil {
+ goto WriteFieldStopError
+ }
+ if err = oprot.WriteStructEnd(); err != nil {
+ goto WriteStructEndError
+ }
+ return nil
+WriteStructBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+WriteFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err)
+WriteFieldStopError:
+ return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err)
+WriteStructEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err)
}
-func (p *BackendServiceClient) CleanTrash(ctx context.Context) (err error) {
- var _args BackendServiceCleanTrashArgs
- if err = p.Client_().Call(ctx, "clean_trash", &_args, nil); err != nil {
- return
+
+func (p *BackendServiceMakeSnapshotResult) writeField0(oprot thrift.TProtocol) (err error) {
+ if p.IsSetSuccess() {
+ if err = oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := p.Success.Write(oprot); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
}
return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 0 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 0 end error: ", p), err)
}
-func (p *BackendServiceClient) CheckStorageFormat(ctx context.Context) (r *TCheckStorageFormatResult_, err error) {
- var _args BackendServiceCheckStorageFormatArgs
- var _result BackendServiceCheckStorageFormatResult
- if err = p.Client_().Call(ctx, "check_storage_format", &_args, &_result); err != nil {
- return
+
+func (p *BackendServiceMakeSnapshotResult) String() string {
+ if p == nil {
+ return ""
}
- return _result.GetSuccess(), nil
+ return fmt.Sprintf("BackendServiceMakeSnapshotResult(%+v)", *p)
+
}
-func (p *BackendServiceClient) IngestBinlog(ctx context.Context, ingestBinlogRequest *TIngestBinlogRequest) (r *TIngestBinlogResult_, err error) {
- var _args BackendServiceIngestBinlogArgs
- _args.IngestBinlogRequest = ingestBinlogRequest
- var _result BackendServiceIngestBinlogResult
- if err = p.Client_().Call(ctx, "ingest_binlog", &_args, &_result); err != nil {
- return
+
+func (p *BackendServiceMakeSnapshotResult) DeepEqual(ano *BackendServiceMakeSnapshotResult) bool {
+ if p == ano {
+ return true
+ } else if p == nil || ano == nil {
+ return false
}
- return _result.GetSuccess(), nil
+ if !p.Field0DeepEqual(ano.Success) {
+ return false
+ }
+ return true
}
-type BackendServiceProcessor struct {
- processorMap map[string]thrift.TProcessorFunction
- handler BackendService
+func (p *BackendServiceMakeSnapshotResult) Field0DeepEqual(src *agentservice.TAgentResult_) bool {
+
+ if !p.Success.DeepEqual(src) {
+ return false
+ }
+ return true
}
-func (p *BackendServiceProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) {
- p.processorMap[key] = processor
+type BackendServiceReleaseSnapshotArgs struct {
+ SnapshotPath string `thrift:"snapshot_path,1" frugal:"1,default,string" json:"snapshot_path"`
}
-func (p *BackendServiceProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) {
- processor, ok = p.processorMap[key]
- return processor, ok
+func NewBackendServiceReleaseSnapshotArgs() *BackendServiceReleaseSnapshotArgs {
+ return &BackendServiceReleaseSnapshotArgs{}
}
-func (p *BackendServiceProcessor) ProcessorMap() map[string]thrift.TProcessorFunction {
- return p.processorMap
+func (p *BackendServiceReleaseSnapshotArgs) InitDefault() {
}
-func NewBackendServiceProcessor(handler BackendService) *BackendServiceProcessor {
- self := &BackendServiceProcessor{handler: handler, processorMap: make(map[string]thrift.TProcessorFunction)}
- self.AddToProcessorMap("exec_plan_fragment", &backendServiceProcessorExecPlanFragment{handler: handler})
- self.AddToProcessorMap("cancel_plan_fragment", &backendServiceProcessorCancelPlanFragment{handler: handler})
- self.AddToProcessorMap("transmit_data", &backendServiceProcessorTransmitData{handler: handler})
- self.AddToProcessorMap("submit_tasks", &backendServiceProcessorSubmitTasks{handler: handler})
- self.AddToProcessorMap("make_snapshot", &backendServiceProcessorMakeSnapshot{handler: handler})
- self.AddToProcessorMap("release_snapshot", &backendServiceProcessorReleaseSnapshot{handler: handler})
- self.AddToProcessorMap("publish_cluster_state", &backendServiceProcessorPublishClusterState{handler: handler})
- self.AddToProcessorMap("submit_export_task", &backendServiceProcessorSubmitExportTask{handler: handler})
- self.AddToProcessorMap("get_export_status", &backendServiceProcessorGetExportStatus{handler: handler})
- self.AddToProcessorMap("erase_export_task", &backendServiceProcessorEraseExportTask{handler: handler})
- self.AddToProcessorMap("get_tablet_stat", &backendServiceProcessorGetTabletStat{handler: handler})
- self.AddToProcessorMap("get_trash_used_capacity", &backendServiceProcessorGetTrashUsedCapacity{handler: handler})
- self.AddToProcessorMap("get_disk_trash_used_capacity", &backendServiceProcessorGetDiskTrashUsedCapacity{handler: handler})
- self.AddToProcessorMap("submit_routine_load_task", &backendServiceProcessorSubmitRoutineLoadTask{handler: handler})
- self.AddToProcessorMap("open_scanner", &backendServiceProcessorOpenScanner{handler: handler})
- self.AddToProcessorMap("get_next", &backendServiceProcessorGetNext{handler: handler})
- self.AddToProcessorMap("close_scanner", &backendServiceProcessorCloseScanner{handler: handler})
- self.AddToProcessorMap("get_stream_load_record", &backendServiceProcessorGetStreamLoadRecord{handler: handler})
- self.AddToProcessorMap("clean_trash", &backendServiceProcessorCleanTrash{handler: handler})
- self.AddToProcessorMap("check_storage_format", &backendServiceProcessorCheckStorageFormat{handler: handler})
- self.AddToProcessorMap("ingest_binlog", &backendServiceProcessorIngestBinlog{handler: handler})
- return self
+func (p *BackendServiceReleaseSnapshotArgs) GetSnapshotPath() (v string) {
+ return p.SnapshotPath
}
-func (p *BackendServiceProcessor) Process(ctx context.Context, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
- name, _, seqId, err := iprot.ReadMessageBegin()
- if err != nil {
- return false, err
- }
- if processor, ok := p.GetProcessorFunction(name); ok {
- return processor.Process(ctx, seqId, iprot, oprot)
- }
- iprot.Skip(thrift.STRUCT)
- iprot.ReadMessageEnd()
- x := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function "+name)
- oprot.WriteMessageBegin(name, thrift.EXCEPTION, seqId)
- x.Write(oprot)
- oprot.WriteMessageEnd()
- oprot.Flush(ctx)
- return false, x
+func (p *BackendServiceReleaseSnapshotArgs) SetSnapshotPath(val string) {
+ p.SnapshotPath = val
}
-type backendServiceProcessorExecPlanFragment struct {
- handler BackendService
+var fieldIDToName_BackendServiceReleaseSnapshotArgs = map[int16]string{
+ 1: "snapshot_path",
}
-func (p *backendServiceProcessorExecPlanFragment) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
- args := BackendServiceExecPlanFragmentArgs{}
- if err = args.Read(iprot); err != nil {
- iprot.ReadMessageEnd()
- x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error())
- oprot.WriteMessageBegin("exec_plan_fragment", thrift.EXCEPTION, seqId)
- x.Write(oprot)
- oprot.WriteMessageEnd()
- oprot.Flush(ctx)
- return false, err
- }
-
- iprot.ReadMessageEnd()
- var err2 error
- result := BackendServiceExecPlanFragmentResult{}
- var retval *palointernalservice.TExecPlanFragmentResult_
- if retval, err2 = p.handler.ExecPlanFragment(ctx, args.Params); err2 != nil {
- x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing exec_plan_fragment: "+err2.Error())
- oprot.WriteMessageBegin("exec_plan_fragment", thrift.EXCEPTION, seqId)
- x.Write(oprot)
- oprot.WriteMessageEnd()
- oprot.Flush(ctx)
- return true, err2
- } else {
- result.Success = retval
- }
- if err2 = oprot.WriteMessageBegin("exec_plan_fragment", thrift.REPLY, seqId); err2 != nil {
- err = err2
- }
- if err2 = result.Write(oprot); err == nil && err2 != nil {
- err = err2
- }
- if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil {
- err = err2
- }
- if err2 = oprot.Flush(ctx); err == nil && err2 != nil {
- err = err2
- }
- if err != nil {
- return
- }
- return true, err
-}
+func (p *BackendServiceReleaseSnapshotArgs) Read(iprot thrift.TProtocol) (err error) {
-type backendServiceProcessorCancelPlanFragment struct {
- handler BackendService
-}
+ var fieldTypeId thrift.TType
+ var fieldId int16
-func (p *backendServiceProcessorCancelPlanFragment) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
- args := BackendServiceCancelPlanFragmentArgs{}
- if err = args.Read(iprot); err != nil {
- iprot.ReadMessageEnd()
- x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error())
- oprot.WriteMessageBegin("cancel_plan_fragment", thrift.EXCEPTION, seqId)
- x.Write(oprot)
- oprot.WriteMessageEnd()
- oprot.Flush(ctx)
- return false, err
+ if _, err = iprot.ReadStructBegin(); err != nil {
+ goto ReadStructBeginError
}
- iprot.ReadMessageEnd()
- var err2 error
- result := BackendServiceCancelPlanFragmentResult{}
- var retval *palointernalservice.TCancelPlanFragmentResult_
- if retval, err2 = p.handler.CancelPlanFragment(ctx, args.Params); err2 != nil {
- x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing cancel_plan_fragment: "+err2.Error())
- oprot.WriteMessageBegin("cancel_plan_fragment", thrift.EXCEPTION, seqId)
- x.Write(oprot)
- oprot.WriteMessageEnd()
- oprot.Flush(ctx)
- return true, err2
- } else {
- result.Success = retval
- }
- if err2 = oprot.WriteMessageBegin("cancel_plan_fragment", thrift.REPLY, seqId); err2 != nil {
- err = err2
- }
- if err2 = result.Write(oprot); err == nil && err2 != nil {
- err = err2
- }
- if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil {
- err = err2
- }
- if err2 = oprot.Flush(ctx); err == nil && err2 != nil {
- err = err2
+ for {
+ _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin()
+ if err != nil {
+ goto ReadFieldBeginError
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+
+ switch fieldId {
+ case 1:
+ if fieldTypeId == thrift.STRING {
+ if err = p.ReadField1(iprot); err != nil {
+ goto ReadFieldError
+ }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ default:
+ if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ }
+ if err = iprot.ReadFieldEnd(); err != nil {
+ goto ReadFieldEndError
+ }
}
- if err != nil {
- return
+ if err = iprot.ReadStructEnd(); err != nil {
+ goto ReadStructEndError
}
- return true, err
-}
-type backendServiceProcessorTransmitData struct {
- handler BackendService
+ return nil
+ReadStructBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err)
+ReadFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
+ReadFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceReleaseSnapshotArgs[fieldId]), err)
+SkipFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
+
+ReadFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err)
+ReadStructEndError:
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
}
-func (p *backendServiceProcessorTransmitData) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
- args := BackendServiceTransmitDataArgs{}
- if err = args.Read(iprot); err != nil {
- iprot.ReadMessageEnd()
- x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error())
- oprot.WriteMessageBegin("transmit_data", thrift.EXCEPTION, seqId)
- x.Write(oprot)
- oprot.WriteMessageEnd()
- oprot.Flush(ctx)
- return false, err
- }
+func (p *BackendServiceReleaseSnapshotArgs) ReadField1(iprot thrift.TProtocol) error {
- iprot.ReadMessageEnd()
- var err2 error
- result := BackendServiceTransmitDataResult{}
- var retval *palointernalservice.TTransmitDataResult_
- if retval, err2 = p.handler.TransmitData(ctx, args.Params); err2 != nil {
- x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing transmit_data: "+err2.Error())
- oprot.WriteMessageBegin("transmit_data", thrift.EXCEPTION, seqId)
- x.Write(oprot)
- oprot.WriteMessageEnd()
- oprot.Flush(ctx)
- return true, err2
+ var _field string
+ if v, err := iprot.ReadString(); err != nil {
+ return err
} else {
- result.Success = retval
+ _field = v
}
- if err2 = oprot.WriteMessageBegin("transmit_data", thrift.REPLY, seqId); err2 != nil {
- err = err2
- }
- if err2 = result.Write(oprot); err == nil && err2 != nil {
- err = err2
+ p.SnapshotPath = _field
+ return nil
+}
+
+func (p *BackendServiceReleaseSnapshotArgs) Write(oprot thrift.TProtocol) (err error) {
+ var fieldId int16
+ if err = oprot.WriteStructBegin("release_snapshot_args"); err != nil {
+ goto WriteStructBeginError
}
- if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil {
- err = err2
+ if p != nil {
+ if err = p.writeField1(oprot); err != nil {
+ fieldId = 1
+ goto WriteFieldError
+ }
}
- if err2 = oprot.Flush(ctx); err == nil && err2 != nil {
- err = err2
+ if err = oprot.WriteFieldStop(); err != nil {
+ goto WriteFieldStopError
}
- if err != nil {
- return
+ if err = oprot.WriteStructEnd(); err != nil {
+ goto WriteStructEndError
}
- return true, err
-}
-
-type backendServiceProcessorSubmitTasks struct {
- handler BackendService
+ return nil
+WriteStructBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+WriteFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err)
+WriteFieldStopError:
+ return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err)
+WriteStructEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err)
}
-func (p *backendServiceProcessorSubmitTasks) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
- args := BackendServiceSubmitTasksArgs{}
- if err = args.Read(iprot); err != nil {
- iprot.ReadMessageEnd()
- x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error())
- oprot.WriteMessageBegin("submit_tasks", thrift.EXCEPTION, seqId)
- x.Write(oprot)
- oprot.WriteMessageEnd()
- oprot.Flush(ctx)
- return false, err
+func (p *BackendServiceReleaseSnapshotArgs) writeField1(oprot thrift.TProtocol) (err error) {
+ if err = oprot.WriteFieldBegin("snapshot_path", thrift.STRING, 1); err != nil {
+ goto WriteFieldBeginError
}
-
- iprot.ReadMessageEnd()
- var err2 error
- result := BackendServiceSubmitTasksResult{}
- var retval *agentservice.TAgentResult_
- if retval, err2 = p.handler.SubmitTasks(ctx, args.Tasks); err2 != nil {
- x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing submit_tasks: "+err2.Error())
- oprot.WriteMessageBegin("submit_tasks", thrift.EXCEPTION, seqId)
- x.Write(oprot)
- oprot.WriteMessageEnd()
- oprot.Flush(ctx)
- return true, err2
- } else {
- result.Success = retval
+ if err := oprot.WriteString(p.SnapshotPath); err != nil {
+ return err
}
- if err2 = oprot.WriteMessageBegin("submit_tasks", thrift.REPLY, seqId); err2 != nil {
- err = err2
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
}
- if err2 = result.Write(oprot); err == nil && err2 != nil {
- err = err2
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err)
+}
+
+func (p *BackendServiceReleaseSnapshotArgs) String() string {
+ if p == nil {
+ return ""
}
- if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil {
- err = err2
+ return fmt.Sprintf("BackendServiceReleaseSnapshotArgs(%+v)", *p)
+
+}
+
+func (p *BackendServiceReleaseSnapshotArgs) DeepEqual(ano *BackendServiceReleaseSnapshotArgs) bool {
+ if p == ano {
+ return true
+ } else if p == nil || ano == nil {
+ return false
}
- if err2 = oprot.Flush(ctx); err == nil && err2 != nil {
- err = err2
+ if !p.Field1DeepEqual(ano.SnapshotPath) {
+ return false
}
- if err != nil {
- return
+ return true
+}
+
+func (p *BackendServiceReleaseSnapshotArgs) Field1DeepEqual(src string) bool {
+
+ if strings.Compare(p.SnapshotPath, src) != 0 {
+ return false
}
- return true, err
+ return true
}
-type backendServiceProcessorMakeSnapshot struct {
- handler BackendService
+type BackendServiceReleaseSnapshotResult struct {
+ Success *agentservice.TAgentResult_ `thrift:"success,0,optional" frugal:"0,optional,agentservice.TAgentResult_" json:"success,omitempty"`
}
-func (p *backendServiceProcessorMakeSnapshot) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
- args := BackendServiceMakeSnapshotArgs{}
- if err = args.Read(iprot); err != nil {
- iprot.ReadMessageEnd()
- x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error())
- oprot.WriteMessageBegin("make_snapshot", thrift.EXCEPTION, seqId)
- x.Write(oprot)
- oprot.WriteMessageEnd()
- oprot.Flush(ctx)
- return false, err
- }
+func NewBackendServiceReleaseSnapshotResult() *BackendServiceReleaseSnapshotResult {
+ return &BackendServiceReleaseSnapshotResult{}
+}
- iprot.ReadMessageEnd()
- var err2 error
- result := BackendServiceMakeSnapshotResult{}
- var retval *agentservice.TAgentResult_
- if retval, err2 = p.handler.MakeSnapshot(ctx, args.SnapshotRequest); err2 != nil {
- x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing make_snapshot: "+err2.Error())
- oprot.WriteMessageBegin("make_snapshot", thrift.EXCEPTION, seqId)
- x.Write(oprot)
- oprot.WriteMessageEnd()
- oprot.Flush(ctx)
- return true, err2
- } else {
- result.Success = retval
- }
- if err2 = oprot.WriteMessageBegin("make_snapshot", thrift.REPLY, seqId); err2 != nil {
- err = err2
- }
- if err2 = result.Write(oprot); err == nil && err2 != nil {
- err = err2
+func (p *BackendServiceReleaseSnapshotResult) InitDefault() {
+}
+
+var BackendServiceReleaseSnapshotResult_Success_DEFAULT *agentservice.TAgentResult_
+
+func (p *BackendServiceReleaseSnapshotResult) GetSuccess() (v *agentservice.TAgentResult_) {
+ if !p.IsSetSuccess() {
+ return BackendServiceReleaseSnapshotResult_Success_DEFAULT
}
- if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil {
- err = err2
+ return p.Success
+}
+func (p *BackendServiceReleaseSnapshotResult) SetSuccess(x interface{}) {
+ p.Success = x.(*agentservice.TAgentResult_)
+}
+
+var fieldIDToName_BackendServiceReleaseSnapshotResult = map[int16]string{
+ 0: "success",
+}
+
+func (p *BackendServiceReleaseSnapshotResult) IsSetSuccess() bool {
+ return p.Success != nil
+}
+
+func (p *BackendServiceReleaseSnapshotResult) Read(iprot thrift.TProtocol) (err error) {
+
+ var fieldTypeId thrift.TType
+ var fieldId int16
+
+ if _, err = iprot.ReadStructBegin(); err != nil {
+ goto ReadStructBeginError
}
- if err2 = oprot.Flush(ctx); err == nil && err2 != nil {
- err = err2
+
+ for {
+ _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin()
+ if err != nil {
+ goto ReadFieldBeginError
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+
+ switch fieldId {
+ case 0:
+ if fieldTypeId == thrift.STRUCT {
+ if err = p.ReadField0(iprot); err != nil {
+ goto ReadFieldError
+ }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ default:
+ if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ }
+ if err = iprot.ReadFieldEnd(); err != nil {
+ goto ReadFieldEndError
+ }
}
- if err != nil {
- return
+ if err = iprot.ReadStructEnd(); err != nil {
+ goto ReadStructEndError
}
- return true, err
-}
-type backendServiceProcessorReleaseSnapshot struct {
- handler BackendService
+ return nil
+ReadStructBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err)
+ReadFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
+ReadFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceReleaseSnapshotResult[fieldId]), err)
+SkipFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
+
+ReadFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err)
+ReadStructEndError:
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
}
-func (p *backendServiceProcessorReleaseSnapshot) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
- args := BackendServiceReleaseSnapshotArgs{}
- if err = args.Read(iprot); err != nil {
- iprot.ReadMessageEnd()
- x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error())
- oprot.WriteMessageBegin("release_snapshot", thrift.EXCEPTION, seqId)
- x.Write(oprot)
- oprot.WriteMessageEnd()
- oprot.Flush(ctx)
- return false, err
+func (p *BackendServiceReleaseSnapshotResult) ReadField0(iprot thrift.TProtocol) error {
+ _field := agentservice.NewTAgentResult_()
+ if err := _field.Read(iprot); err != nil {
+ return err
}
+ p.Success = _field
+ return nil
+}
- iprot.ReadMessageEnd()
- var err2 error
- result := BackendServiceReleaseSnapshotResult{}
- var retval *agentservice.TAgentResult_
- if retval, err2 = p.handler.ReleaseSnapshot(ctx, args.SnapshotPath); err2 != nil {
- x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing release_snapshot: "+err2.Error())
- oprot.WriteMessageBegin("release_snapshot", thrift.EXCEPTION, seqId)
- x.Write(oprot)
- oprot.WriteMessageEnd()
- oprot.Flush(ctx)
- return true, err2
- } else {
- result.Success = retval
- }
- if err2 = oprot.WriteMessageBegin("release_snapshot", thrift.REPLY, seqId); err2 != nil {
- err = err2
- }
- if err2 = result.Write(oprot); err == nil && err2 != nil {
- err = err2
+func (p *BackendServiceReleaseSnapshotResult) Write(oprot thrift.TProtocol) (err error) {
+ var fieldId int16
+ if err = oprot.WriteStructBegin("release_snapshot_result"); err != nil {
+ goto WriteStructBeginError
}
- if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil {
- err = err2
+ if p != nil {
+ if err = p.writeField0(oprot); err != nil {
+ fieldId = 0
+ goto WriteFieldError
+ }
}
- if err2 = oprot.Flush(ctx); err == nil && err2 != nil {
- err = err2
+ if err = oprot.WriteFieldStop(); err != nil {
+ goto WriteFieldStopError
}
- if err != nil {
- return
+ if err = oprot.WriteStructEnd(); err != nil {
+ goto WriteStructEndError
}
- return true, err
+ return nil
+WriteStructBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+WriteFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err)
+WriteFieldStopError:
+ return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err)
+WriteStructEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err)
}
-type backendServiceProcessorPublishClusterState struct {
- handler BackendService
+func (p *BackendServiceReleaseSnapshotResult) writeField0(oprot thrift.TProtocol) (err error) {
+ if p.IsSetSuccess() {
+ if err = oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := p.Success.Write(oprot); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 0 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 0 end error: ", p), err)
}
-func (p *backendServiceProcessorPublishClusterState) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
- args := BackendServicePublishClusterStateArgs{}
- if err = args.Read(iprot); err != nil {
- iprot.ReadMessageEnd()
- x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error())
- oprot.WriteMessageBegin("publish_cluster_state", thrift.EXCEPTION, seqId)
- x.Write(oprot)
- oprot.WriteMessageEnd()
- oprot.Flush(ctx)
- return false, err
+func (p *BackendServiceReleaseSnapshotResult) String() string {
+ if p == nil {
+ return ""
}
+ return fmt.Sprintf("BackendServiceReleaseSnapshotResult(%+v)", *p)
- iprot.ReadMessageEnd()
- var err2 error
- result := BackendServicePublishClusterStateResult{}
- var retval *agentservice.TAgentResult_
- if retval, err2 = p.handler.PublishClusterState(ctx, args.Request); err2 != nil {
- x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing publish_cluster_state: "+err2.Error())
- oprot.WriteMessageBegin("publish_cluster_state", thrift.EXCEPTION, seqId)
- x.Write(oprot)
- oprot.WriteMessageEnd()
- oprot.Flush(ctx)
- return true, err2
- } else {
- result.Success = retval
- }
- if err2 = oprot.WriteMessageBegin("publish_cluster_state", thrift.REPLY, seqId); err2 != nil {
- err = err2
- }
- if err2 = result.Write(oprot); err == nil && err2 != nil {
- err = err2
- }
- if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil {
- err = err2
+}
+
+func (p *BackendServiceReleaseSnapshotResult) DeepEqual(ano *BackendServiceReleaseSnapshotResult) bool {
+ if p == ano {
+ return true
+ } else if p == nil || ano == nil {
+ return false
}
- if err2 = oprot.Flush(ctx); err == nil && err2 != nil {
- err = err2
+ if !p.Field0DeepEqual(ano.Success) {
+ return false
}
- if err != nil {
- return
+ return true
+}
+
+func (p *BackendServiceReleaseSnapshotResult) Field0DeepEqual(src *agentservice.TAgentResult_) bool {
+
+ if !p.Success.DeepEqual(src) {
+ return false
}
- return true, err
+ return true
}
-type backendServiceProcessorSubmitExportTask struct {
- handler BackendService
+type BackendServicePublishClusterStateArgs struct {
+ Request *agentservice.TAgentPublishRequest `thrift:"request,1" frugal:"1,default,agentservice.TAgentPublishRequest" json:"request"`
}
-func (p *backendServiceProcessorSubmitExportTask) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
- args := BackendServiceSubmitExportTaskArgs{}
- if err = args.Read(iprot); err != nil {
- iprot.ReadMessageEnd()
- x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error())
- oprot.WriteMessageBegin("submit_export_task", thrift.EXCEPTION, seqId)
- x.Write(oprot)
- oprot.WriteMessageEnd()
- oprot.Flush(ctx)
- return false, err
- }
+func NewBackendServicePublishClusterStateArgs() *BackendServicePublishClusterStateArgs {
+ return &BackendServicePublishClusterStateArgs{}
+}
- iprot.ReadMessageEnd()
- var err2 error
- result := BackendServiceSubmitExportTaskResult{}
- var retval *status.TStatus
- if retval, err2 = p.handler.SubmitExportTask(ctx, args.Request); err2 != nil {
- x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing submit_export_task: "+err2.Error())
- oprot.WriteMessageBegin("submit_export_task", thrift.EXCEPTION, seqId)
- x.Write(oprot)
- oprot.WriteMessageEnd()
- oprot.Flush(ctx)
- return true, err2
- } else {
- result.Success = retval
- }
- if err2 = oprot.WriteMessageBegin("submit_export_task", thrift.REPLY, seqId); err2 != nil {
- err = err2
- }
- if err2 = result.Write(oprot); err == nil && err2 != nil {
- err = err2
+func (p *BackendServicePublishClusterStateArgs) InitDefault() {
+}
+
+var BackendServicePublishClusterStateArgs_Request_DEFAULT *agentservice.TAgentPublishRequest
+
+func (p *BackendServicePublishClusterStateArgs) GetRequest() (v *agentservice.TAgentPublishRequest) {
+ if !p.IsSetRequest() {
+ return BackendServicePublishClusterStateArgs_Request_DEFAULT
}
- if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil {
- err = err2
+ return p.Request
+}
+func (p *BackendServicePublishClusterStateArgs) SetRequest(val *agentservice.TAgentPublishRequest) {
+ p.Request = val
+}
+
+var fieldIDToName_BackendServicePublishClusterStateArgs = map[int16]string{
+ 1: "request",
+}
+
+func (p *BackendServicePublishClusterStateArgs) IsSetRequest() bool {
+ return p.Request != nil
+}
+
+func (p *BackendServicePublishClusterStateArgs) Read(iprot thrift.TProtocol) (err error) {
+
+ var fieldTypeId thrift.TType
+ var fieldId int16
+
+ if _, err = iprot.ReadStructBegin(); err != nil {
+ goto ReadStructBeginError
}
- if err2 = oprot.Flush(ctx); err == nil && err2 != nil {
- err = err2
+
+ for {
+ _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin()
+ if err != nil {
+ goto ReadFieldBeginError
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+
+ switch fieldId {
+ case 1:
+ if fieldTypeId == thrift.STRUCT {
+ if err = p.ReadField1(iprot); err != nil {
+ goto ReadFieldError
+ }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ default:
+ if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ }
+ if err = iprot.ReadFieldEnd(); err != nil {
+ goto ReadFieldEndError
+ }
}
- if err != nil {
- return
+ if err = iprot.ReadStructEnd(); err != nil {
+ goto ReadStructEndError
}
- return true, err
-}
-type backendServiceProcessorGetExportStatus struct {
- handler BackendService
+ return nil
+ReadStructBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err)
+ReadFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
+ReadFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServicePublishClusterStateArgs[fieldId]), err)
+SkipFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
+
+ReadFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err)
+ReadStructEndError:
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
}
-func (p *backendServiceProcessorGetExportStatus) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
- args := BackendServiceGetExportStatusArgs{}
- if err = args.Read(iprot); err != nil {
- iprot.ReadMessageEnd()
- x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error())
- oprot.WriteMessageBegin("get_export_status", thrift.EXCEPTION, seqId)
- x.Write(oprot)
- oprot.WriteMessageEnd()
- oprot.Flush(ctx)
- return false, err
+func (p *BackendServicePublishClusterStateArgs) ReadField1(iprot thrift.TProtocol) error {
+ _field := agentservice.NewTAgentPublishRequest()
+ if err := _field.Read(iprot); err != nil {
+ return err
}
+ p.Request = _field
+ return nil
+}
- iprot.ReadMessageEnd()
- var err2 error
- result := BackendServiceGetExportStatusResult{}
- var retval *palointernalservice.TExportStatusResult_
- if retval, err2 = p.handler.GetExportStatus(ctx, args.TaskId); err2 != nil {
- x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing get_export_status: "+err2.Error())
- oprot.WriteMessageBegin("get_export_status", thrift.EXCEPTION, seqId)
- x.Write(oprot)
- oprot.WriteMessageEnd()
- oprot.Flush(ctx)
- return true, err2
- } else {
- result.Success = retval
- }
- if err2 = oprot.WriteMessageBegin("get_export_status", thrift.REPLY, seqId); err2 != nil {
- err = err2
- }
- if err2 = result.Write(oprot); err == nil && err2 != nil {
- err = err2
+func (p *BackendServicePublishClusterStateArgs) Write(oprot thrift.TProtocol) (err error) {
+ var fieldId int16
+ if err = oprot.WriteStructBegin("publish_cluster_state_args"); err != nil {
+ goto WriteStructBeginError
}
- if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil {
- err = err2
+ if p != nil {
+ if err = p.writeField1(oprot); err != nil {
+ fieldId = 1
+ goto WriteFieldError
+ }
}
- if err2 = oprot.Flush(ctx); err == nil && err2 != nil {
- err = err2
+ if err = oprot.WriteFieldStop(); err != nil {
+ goto WriteFieldStopError
}
- if err != nil {
- return
+ if err = oprot.WriteStructEnd(); err != nil {
+ goto WriteStructEndError
}
- return true, err
-}
-
-type backendServiceProcessorEraseExportTask struct {
- handler BackendService
+ return nil
+WriteStructBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+WriteFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err)
+WriteFieldStopError:
+ return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err)
+WriteStructEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err)
}
-func (p *backendServiceProcessorEraseExportTask) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
- args := BackendServiceEraseExportTaskArgs{}
- if err = args.Read(iprot); err != nil {
- iprot.ReadMessageEnd()
- x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error())
- oprot.WriteMessageBegin("erase_export_task", thrift.EXCEPTION, seqId)
- x.Write(oprot)
- oprot.WriteMessageEnd()
- oprot.Flush(ctx)
- return false, err
+func (p *BackendServicePublishClusterStateArgs) writeField1(oprot thrift.TProtocol) (err error) {
+ if err = oprot.WriteFieldBegin("request", thrift.STRUCT, 1); err != nil {
+ goto WriteFieldBeginError
}
-
- iprot.ReadMessageEnd()
- var err2 error
- result := BackendServiceEraseExportTaskResult{}
- var retval *status.TStatus
- if retval, err2 = p.handler.EraseExportTask(ctx, args.TaskId); err2 != nil {
- x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing erase_export_task: "+err2.Error())
- oprot.WriteMessageBegin("erase_export_task", thrift.EXCEPTION, seqId)
- x.Write(oprot)
- oprot.WriteMessageEnd()
- oprot.Flush(ctx)
- return true, err2
- } else {
- result.Success = retval
+ if err := p.Request.Write(oprot); err != nil {
+ return err
}
- if err2 = oprot.WriteMessageBegin("erase_export_task", thrift.REPLY, seqId); err2 != nil {
- err = err2
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
}
- if err2 = result.Write(oprot); err == nil && err2 != nil {
- err = err2
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err)
+}
+
+func (p *BackendServicePublishClusterStateArgs) String() string {
+ if p == nil {
+ return ""
}
- if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil {
- err = err2
+ return fmt.Sprintf("BackendServicePublishClusterStateArgs(%+v)", *p)
+
+}
+
+func (p *BackendServicePublishClusterStateArgs) DeepEqual(ano *BackendServicePublishClusterStateArgs) bool {
+ if p == ano {
+ return true
+ } else if p == nil || ano == nil {
+ return false
}
- if err2 = oprot.Flush(ctx); err == nil && err2 != nil {
- err = err2
+ if !p.Field1DeepEqual(ano.Request) {
+ return false
}
- if err != nil {
- return
+ return true
+}
+
+func (p *BackendServicePublishClusterStateArgs) Field1DeepEqual(src *agentservice.TAgentPublishRequest) bool {
+
+ if !p.Request.DeepEqual(src) {
+ return false
}
- return true, err
+ return true
}
-type backendServiceProcessorGetTabletStat struct {
- handler BackendService
+type BackendServicePublishClusterStateResult struct {
+ Success *agentservice.TAgentResult_ `thrift:"success,0,optional" frugal:"0,optional,agentservice.TAgentResult_" json:"success,omitempty"`
}
-func (p *backendServiceProcessorGetTabletStat) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
- args := BackendServiceGetTabletStatArgs{}
- if err = args.Read(iprot); err != nil {
- iprot.ReadMessageEnd()
- x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error())
- oprot.WriteMessageBegin("get_tablet_stat", thrift.EXCEPTION, seqId)
- x.Write(oprot)
- oprot.WriteMessageEnd()
- oprot.Flush(ctx)
- return false, err
- }
+func NewBackendServicePublishClusterStateResult() *BackendServicePublishClusterStateResult {
+ return &BackendServicePublishClusterStateResult{}
+}
- iprot.ReadMessageEnd()
- var err2 error
- result := BackendServiceGetTabletStatResult{}
- var retval *TTabletStatResult_
- if retval, err2 = p.handler.GetTabletStat(ctx); err2 != nil {
- x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing get_tablet_stat: "+err2.Error())
- oprot.WriteMessageBegin("get_tablet_stat", thrift.EXCEPTION, seqId)
- x.Write(oprot)
- oprot.WriteMessageEnd()
- oprot.Flush(ctx)
- return true, err2
- } else {
- result.Success = retval
- }
- if err2 = oprot.WriteMessageBegin("get_tablet_stat", thrift.REPLY, seqId); err2 != nil {
- err = err2
- }
- if err2 = result.Write(oprot); err == nil && err2 != nil {
- err = err2
+func (p *BackendServicePublishClusterStateResult) InitDefault() {
+}
+
+var BackendServicePublishClusterStateResult_Success_DEFAULT *agentservice.TAgentResult_
+
+func (p *BackendServicePublishClusterStateResult) GetSuccess() (v *agentservice.TAgentResult_) {
+ if !p.IsSetSuccess() {
+ return BackendServicePublishClusterStateResult_Success_DEFAULT
}
- if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil {
- err = err2
+ return p.Success
+}
+func (p *BackendServicePublishClusterStateResult) SetSuccess(x interface{}) {
+ p.Success = x.(*agentservice.TAgentResult_)
+}
+
+var fieldIDToName_BackendServicePublishClusterStateResult = map[int16]string{
+ 0: "success",
+}
+
+func (p *BackendServicePublishClusterStateResult) IsSetSuccess() bool {
+ return p.Success != nil
+}
+
+func (p *BackendServicePublishClusterStateResult) Read(iprot thrift.TProtocol) (err error) {
+
+ var fieldTypeId thrift.TType
+ var fieldId int16
+
+ if _, err = iprot.ReadStructBegin(); err != nil {
+ goto ReadStructBeginError
}
- if err2 = oprot.Flush(ctx); err == nil && err2 != nil {
- err = err2
+
+ for {
+ _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin()
+ if err != nil {
+ goto ReadFieldBeginError
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+
+ switch fieldId {
+ case 0:
+ if fieldTypeId == thrift.STRUCT {
+ if err = p.ReadField0(iprot); err != nil {
+ goto ReadFieldError
+ }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ default:
+ if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ }
+ if err = iprot.ReadFieldEnd(); err != nil {
+ goto ReadFieldEndError
+ }
}
- if err != nil {
- return
+ if err = iprot.ReadStructEnd(); err != nil {
+ goto ReadStructEndError
}
- return true, err
-}
-type backendServiceProcessorGetTrashUsedCapacity struct {
- handler BackendService
+ return nil
+ReadStructBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err)
+ReadFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
+ReadFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServicePublishClusterStateResult[fieldId]), err)
+SkipFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
+
+ReadFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err)
+ReadStructEndError:
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
}
-func (p *backendServiceProcessorGetTrashUsedCapacity) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
- args := BackendServiceGetTrashUsedCapacityArgs{}
- if err = args.Read(iprot); err != nil {
- iprot.ReadMessageEnd()
- x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error())
- oprot.WriteMessageBegin("get_trash_used_capacity", thrift.EXCEPTION, seqId)
- x.Write(oprot)
- oprot.WriteMessageEnd()
- oprot.Flush(ctx)
- return false, err
+func (p *BackendServicePublishClusterStateResult) ReadField0(iprot thrift.TProtocol) error {
+ _field := agentservice.NewTAgentResult_()
+ if err := _field.Read(iprot); err != nil {
+ return err
}
+ p.Success = _field
+ return nil
+}
- iprot.ReadMessageEnd()
- var err2 error
- result := BackendServiceGetTrashUsedCapacityResult{}
- var retval int64
- if retval, err2 = p.handler.GetTrashUsedCapacity(ctx); err2 != nil {
- x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing get_trash_used_capacity: "+err2.Error())
- oprot.WriteMessageBegin("get_trash_used_capacity", thrift.EXCEPTION, seqId)
- x.Write(oprot)
- oprot.WriteMessageEnd()
- oprot.Flush(ctx)
- return true, err2
- } else {
- result.Success = &retval
- }
- if err2 = oprot.WriteMessageBegin("get_trash_used_capacity", thrift.REPLY, seqId); err2 != nil {
- err = err2
- }
- if err2 = result.Write(oprot); err == nil && err2 != nil {
- err = err2
+func (p *BackendServicePublishClusterStateResult) Write(oprot thrift.TProtocol) (err error) {
+ var fieldId int16
+ if err = oprot.WriteStructBegin("publish_cluster_state_result"); err != nil {
+ goto WriteStructBeginError
}
- if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil {
- err = err2
+ if p != nil {
+ if err = p.writeField0(oprot); err != nil {
+ fieldId = 0
+ goto WriteFieldError
+ }
}
- if err2 = oprot.Flush(ctx); err == nil && err2 != nil {
- err = err2
+ if err = oprot.WriteFieldStop(); err != nil {
+ goto WriteFieldStopError
}
- if err != nil {
- return
+ if err = oprot.WriteStructEnd(); err != nil {
+ goto WriteStructEndError
}
- return true, err
+ return nil
+WriteStructBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+WriteFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err)
+WriteFieldStopError:
+ return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err)
+WriteStructEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err)
}
-type backendServiceProcessorGetDiskTrashUsedCapacity struct {
- handler BackendService
+func (p *BackendServicePublishClusterStateResult) writeField0(oprot thrift.TProtocol) (err error) {
+ if p.IsSetSuccess() {
+ if err = oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := p.Success.Write(oprot); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 0 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 0 end error: ", p), err)
}
-func (p *backendServiceProcessorGetDiskTrashUsedCapacity) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
- args := BackendServiceGetDiskTrashUsedCapacityArgs{}
- if err = args.Read(iprot); err != nil {
- iprot.ReadMessageEnd()
- x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error())
- oprot.WriteMessageBegin("get_disk_trash_used_capacity", thrift.EXCEPTION, seqId)
- x.Write(oprot)
- oprot.WriteMessageEnd()
- oprot.Flush(ctx)
- return false, err
+func (p *BackendServicePublishClusterStateResult) String() string {
+ if p == nil {
+ return ""
}
+ return fmt.Sprintf("BackendServicePublishClusterStateResult(%+v)", *p)
- iprot.ReadMessageEnd()
- var err2 error
- result := BackendServiceGetDiskTrashUsedCapacityResult{}
- var retval []*TDiskTrashInfo
- if retval, err2 = p.handler.GetDiskTrashUsedCapacity(ctx); err2 != nil {
- x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing get_disk_trash_used_capacity: "+err2.Error())
- oprot.WriteMessageBegin("get_disk_trash_used_capacity", thrift.EXCEPTION, seqId)
- x.Write(oprot)
- oprot.WriteMessageEnd()
- oprot.Flush(ctx)
- return true, err2
- } else {
- result.Success = retval
- }
- if err2 = oprot.WriteMessageBegin("get_disk_trash_used_capacity", thrift.REPLY, seqId); err2 != nil {
- err = err2
- }
- if err2 = result.Write(oprot); err == nil && err2 != nil {
- err = err2
- }
- if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil {
- err = err2
+}
+
+func (p *BackendServicePublishClusterStateResult) DeepEqual(ano *BackendServicePublishClusterStateResult) bool {
+ if p == ano {
+ return true
+ } else if p == nil || ano == nil {
+ return false
}
- if err2 = oprot.Flush(ctx); err == nil && err2 != nil {
- err = err2
+ if !p.Field0DeepEqual(ano.Success) {
+ return false
}
- if err != nil {
- return
+ return true
+}
+
+func (p *BackendServicePublishClusterStateResult) Field0DeepEqual(src *agentservice.TAgentResult_) bool {
+
+ if !p.Success.DeepEqual(src) {
+ return false
}
- return true, err
+ return true
}
-type backendServiceProcessorSubmitRoutineLoadTask struct {
- handler BackendService
+type BackendServiceSubmitExportTaskArgs struct {
+ Request *TExportTaskRequest `thrift:"request,1" frugal:"1,default,TExportTaskRequest" json:"request"`
}
-func (p *backendServiceProcessorSubmitRoutineLoadTask) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
- args := BackendServiceSubmitRoutineLoadTaskArgs{}
- if err = args.Read(iprot); err != nil {
- iprot.ReadMessageEnd()
- x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error())
- oprot.WriteMessageBegin("submit_routine_load_task", thrift.EXCEPTION, seqId)
- x.Write(oprot)
- oprot.WriteMessageEnd()
- oprot.Flush(ctx)
- return false, err
- }
+func NewBackendServiceSubmitExportTaskArgs() *BackendServiceSubmitExportTaskArgs {
+ return &BackendServiceSubmitExportTaskArgs{}
+}
- iprot.ReadMessageEnd()
- var err2 error
- result := BackendServiceSubmitRoutineLoadTaskResult{}
- var retval *status.TStatus
- if retval, err2 = p.handler.SubmitRoutineLoadTask(ctx, args.Tasks); err2 != nil {
- x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing submit_routine_load_task: "+err2.Error())
- oprot.WriteMessageBegin("submit_routine_load_task", thrift.EXCEPTION, seqId)
- x.Write(oprot)
- oprot.WriteMessageEnd()
- oprot.Flush(ctx)
- return true, err2
- } else {
- result.Success = retval
- }
- if err2 = oprot.WriteMessageBegin("submit_routine_load_task", thrift.REPLY, seqId); err2 != nil {
- err = err2
- }
- if err2 = result.Write(oprot); err == nil && err2 != nil {
- err = err2
+func (p *BackendServiceSubmitExportTaskArgs) InitDefault() {
+}
+
+var BackendServiceSubmitExportTaskArgs_Request_DEFAULT *TExportTaskRequest
+
+func (p *BackendServiceSubmitExportTaskArgs) GetRequest() (v *TExportTaskRequest) {
+ if !p.IsSetRequest() {
+ return BackendServiceSubmitExportTaskArgs_Request_DEFAULT
}
- if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil {
- err = err2
+ return p.Request
+}
+func (p *BackendServiceSubmitExportTaskArgs) SetRequest(val *TExportTaskRequest) {
+ p.Request = val
+}
+
+var fieldIDToName_BackendServiceSubmitExportTaskArgs = map[int16]string{
+ 1: "request",
+}
+
+func (p *BackendServiceSubmitExportTaskArgs) IsSetRequest() bool {
+ return p.Request != nil
+}
+
+func (p *BackendServiceSubmitExportTaskArgs) Read(iprot thrift.TProtocol) (err error) {
+
+ var fieldTypeId thrift.TType
+ var fieldId int16
+
+ if _, err = iprot.ReadStructBegin(); err != nil {
+ goto ReadStructBeginError
}
- if err2 = oprot.Flush(ctx); err == nil && err2 != nil {
- err = err2
+
+ for {
+ _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin()
+ if err != nil {
+ goto ReadFieldBeginError
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+
+ switch fieldId {
+ case 1:
+ if fieldTypeId == thrift.STRUCT {
+ if err = p.ReadField1(iprot); err != nil {
+ goto ReadFieldError
+ }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ default:
+ if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ }
+ if err = iprot.ReadFieldEnd(); err != nil {
+ goto ReadFieldEndError
+ }
}
- if err != nil {
- return
+ if err = iprot.ReadStructEnd(); err != nil {
+ goto ReadStructEndError
}
- return true, err
-}
-type backendServiceProcessorOpenScanner struct {
- handler BackendService
+ return nil
+ReadStructBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err)
+ReadFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
+ReadFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceSubmitExportTaskArgs[fieldId]), err)
+SkipFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
+
+ReadFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err)
+ReadStructEndError:
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
}
-func (p *backendServiceProcessorOpenScanner) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
- args := BackendServiceOpenScannerArgs{}
- if err = args.Read(iprot); err != nil {
- iprot.ReadMessageEnd()
- x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error())
- oprot.WriteMessageBegin("open_scanner", thrift.EXCEPTION, seqId)
- x.Write(oprot)
- oprot.WriteMessageEnd()
- oprot.Flush(ctx)
- return false, err
+func (p *BackendServiceSubmitExportTaskArgs) ReadField1(iprot thrift.TProtocol) error {
+ _field := NewTExportTaskRequest()
+ if err := _field.Read(iprot); err != nil {
+ return err
}
+ p.Request = _field
+ return nil
+}
- iprot.ReadMessageEnd()
- var err2 error
- result := BackendServiceOpenScannerResult{}
- var retval *dorisexternalservice.TScanOpenResult_
- if retval, err2 = p.handler.OpenScanner(ctx, args.Params); err2 != nil {
- x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing open_scanner: "+err2.Error())
- oprot.WriteMessageBegin("open_scanner", thrift.EXCEPTION, seqId)
- x.Write(oprot)
- oprot.WriteMessageEnd()
- oprot.Flush(ctx)
- return true, err2
- } else {
- result.Success = retval
- }
- if err2 = oprot.WriteMessageBegin("open_scanner", thrift.REPLY, seqId); err2 != nil {
- err = err2
- }
- if err2 = result.Write(oprot); err == nil && err2 != nil {
- err = err2
+func (p *BackendServiceSubmitExportTaskArgs) Write(oprot thrift.TProtocol) (err error) {
+ var fieldId int16
+ if err = oprot.WriteStructBegin("submit_export_task_args"); err != nil {
+ goto WriteStructBeginError
}
- if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil {
- err = err2
+ if p != nil {
+ if err = p.writeField1(oprot); err != nil {
+ fieldId = 1
+ goto WriteFieldError
+ }
}
- if err2 = oprot.Flush(ctx); err == nil && err2 != nil {
- err = err2
+ if err = oprot.WriteFieldStop(); err != nil {
+ goto WriteFieldStopError
}
- if err != nil {
- return
+ if err = oprot.WriteStructEnd(); err != nil {
+ goto WriteStructEndError
}
- return true, err
-}
-
-type backendServiceProcessorGetNext struct {
- handler BackendService
+ return nil
+WriteStructBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+WriteFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err)
+WriteFieldStopError:
+ return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err)
+WriteStructEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err)
}
-func (p *backendServiceProcessorGetNext) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
- args := BackendServiceGetNextArgs{}
- if err = args.Read(iprot); err != nil {
- iprot.ReadMessageEnd()
- x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error())
- oprot.WriteMessageBegin("get_next", thrift.EXCEPTION, seqId)
- x.Write(oprot)
- oprot.WriteMessageEnd()
- oprot.Flush(ctx)
- return false, err
+func (p *BackendServiceSubmitExportTaskArgs) writeField1(oprot thrift.TProtocol) (err error) {
+ if err = oprot.WriteFieldBegin("request", thrift.STRUCT, 1); err != nil {
+ goto WriteFieldBeginError
}
-
- iprot.ReadMessageEnd()
- var err2 error
- result := BackendServiceGetNextResult{}
- var retval *dorisexternalservice.TScanBatchResult_
- if retval, err2 = p.handler.GetNext(ctx, args.Params); err2 != nil {
- x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing get_next: "+err2.Error())
- oprot.WriteMessageBegin("get_next", thrift.EXCEPTION, seqId)
- x.Write(oprot)
- oprot.WriteMessageEnd()
- oprot.Flush(ctx)
- return true, err2
- } else {
- result.Success = retval
+ if err := p.Request.Write(oprot); err != nil {
+ return err
}
- if err2 = oprot.WriteMessageBegin("get_next", thrift.REPLY, seqId); err2 != nil {
- err = err2
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
}
- if err2 = result.Write(oprot); err == nil && err2 != nil {
- err = err2
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err)
+}
+
+func (p *BackendServiceSubmitExportTaskArgs) String() string {
+ if p == nil {
+ return ""
}
- if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil {
- err = err2
+ return fmt.Sprintf("BackendServiceSubmitExportTaskArgs(%+v)", *p)
+
+}
+
+func (p *BackendServiceSubmitExportTaskArgs) DeepEqual(ano *BackendServiceSubmitExportTaskArgs) bool {
+ if p == ano {
+ return true
+ } else if p == nil || ano == nil {
+ return false
}
- if err2 = oprot.Flush(ctx); err == nil && err2 != nil {
- err = err2
+ if !p.Field1DeepEqual(ano.Request) {
+ return false
}
- if err != nil {
- return
+ return true
+}
+
+func (p *BackendServiceSubmitExportTaskArgs) Field1DeepEqual(src *TExportTaskRequest) bool {
+
+ if !p.Request.DeepEqual(src) {
+ return false
}
- return true, err
+ return true
}
-type backendServiceProcessorCloseScanner struct {
- handler BackendService
+type BackendServiceSubmitExportTaskResult struct {
+ Success *status.TStatus `thrift:"success,0,optional" frugal:"0,optional,status.TStatus" json:"success,omitempty"`
}
-func (p *backendServiceProcessorCloseScanner) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
- args := BackendServiceCloseScannerArgs{}
- if err = args.Read(iprot); err != nil {
- iprot.ReadMessageEnd()
- x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error())
- oprot.WriteMessageBegin("close_scanner", thrift.EXCEPTION, seqId)
- x.Write(oprot)
- oprot.WriteMessageEnd()
- oprot.Flush(ctx)
- return false, err
- }
+func NewBackendServiceSubmitExportTaskResult() *BackendServiceSubmitExportTaskResult {
+ return &BackendServiceSubmitExportTaskResult{}
+}
- iprot.ReadMessageEnd()
- var err2 error
- result := BackendServiceCloseScannerResult{}
- var retval *dorisexternalservice.TScanCloseResult_
- if retval, err2 = p.handler.CloseScanner(ctx, args.Params); err2 != nil {
- x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing close_scanner: "+err2.Error())
- oprot.WriteMessageBegin("close_scanner", thrift.EXCEPTION, seqId)
- x.Write(oprot)
- oprot.WriteMessageEnd()
- oprot.Flush(ctx)
- return true, err2
- } else {
- result.Success = retval
- }
- if err2 = oprot.WriteMessageBegin("close_scanner", thrift.REPLY, seqId); err2 != nil {
- err = err2
- }
- if err2 = result.Write(oprot); err == nil && err2 != nil {
- err = err2
- }
- if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil {
- err = err2
- }
- if err2 = oprot.Flush(ctx); err == nil && err2 != nil {
- err = err2
- }
- if err != nil {
- return
+func (p *BackendServiceSubmitExportTaskResult) InitDefault() {
+}
+
+var BackendServiceSubmitExportTaskResult_Success_DEFAULT *status.TStatus
+
+func (p *BackendServiceSubmitExportTaskResult) GetSuccess() (v *status.TStatus) {
+ if !p.IsSetSuccess() {
+ return BackendServiceSubmitExportTaskResult_Success_DEFAULT
}
- return true, err
+ return p.Success
+}
+func (p *BackendServiceSubmitExportTaskResult) SetSuccess(x interface{}) {
+ p.Success = x.(*status.TStatus)
}
-type backendServiceProcessorGetStreamLoadRecord struct {
- handler BackendService
+var fieldIDToName_BackendServiceSubmitExportTaskResult = map[int16]string{
+ 0: "success",
}
-func (p *backendServiceProcessorGetStreamLoadRecord) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
- args := BackendServiceGetStreamLoadRecordArgs{}
- if err = args.Read(iprot); err != nil {
- iprot.ReadMessageEnd()
- x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error())
- oprot.WriteMessageBegin("get_stream_load_record", thrift.EXCEPTION, seqId)
- x.Write(oprot)
- oprot.WriteMessageEnd()
- oprot.Flush(ctx)
- return false, err
- }
+func (p *BackendServiceSubmitExportTaskResult) IsSetSuccess() bool {
+ return p.Success != nil
+}
- iprot.ReadMessageEnd()
- var err2 error
- result := BackendServiceGetStreamLoadRecordResult{}
- var retval *TStreamLoadRecordResult_
- if retval, err2 = p.handler.GetStreamLoadRecord(ctx, args.LastStreamRecordTime); err2 != nil {
- x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing get_stream_load_record: "+err2.Error())
- oprot.WriteMessageBegin("get_stream_load_record", thrift.EXCEPTION, seqId)
- x.Write(oprot)
- oprot.WriteMessageEnd()
- oprot.Flush(ctx)
- return true, err2
- } else {
- result.Success = retval
- }
- if err2 = oprot.WriteMessageBegin("get_stream_load_record", thrift.REPLY, seqId); err2 != nil {
- err = err2
- }
- if err2 = result.Write(oprot); err == nil && err2 != nil {
- err = err2
- }
- if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil {
- err = err2
- }
- if err2 = oprot.Flush(ctx); err == nil && err2 != nil {
- err = err2
- }
- if err != nil {
- return
+func (p *BackendServiceSubmitExportTaskResult) Read(iprot thrift.TProtocol) (err error) {
+
+ var fieldTypeId thrift.TType
+ var fieldId int16
+
+ if _, err = iprot.ReadStructBegin(); err != nil {
+ goto ReadStructBeginError
}
- return true, err
-}
-type backendServiceProcessorCleanTrash struct {
- handler BackendService
-}
+ for {
+ _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin()
+ if err != nil {
+ goto ReadFieldBeginError
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
-func (p *backendServiceProcessorCleanTrash) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
- args := BackendServiceCleanTrashArgs{}
- if err = args.Read(iprot); err != nil {
- iprot.ReadMessageEnd()
- return false, err
+ switch fieldId {
+ case 0:
+ if fieldTypeId == thrift.STRUCT {
+ if err = p.ReadField0(iprot); err != nil {
+ goto ReadFieldError
+ }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ default:
+ if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ }
+ if err = iprot.ReadFieldEnd(); err != nil {
+ goto ReadFieldEndError
+ }
}
-
- iprot.ReadMessageEnd()
- var err2 error
- if err2 = p.handler.CleanTrash(ctx); err2 != nil {
- return true, err2
+ if err = iprot.ReadStructEnd(); err != nil {
+ goto ReadStructEndError
}
- return true, nil
-}
-type backendServiceProcessorCheckStorageFormat struct {
- handler BackendService
+ return nil
+ReadStructBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err)
+ReadFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
+ReadFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceSubmitExportTaskResult[fieldId]), err)
+SkipFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
+
+ReadFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err)
+ReadStructEndError:
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
}
-func (p *backendServiceProcessorCheckStorageFormat) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
- args := BackendServiceCheckStorageFormatArgs{}
- if err = args.Read(iprot); err != nil {
- iprot.ReadMessageEnd()
- x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error())
- oprot.WriteMessageBegin("check_storage_format", thrift.EXCEPTION, seqId)
- x.Write(oprot)
- oprot.WriteMessageEnd()
- oprot.Flush(ctx)
- return false, err
+func (p *BackendServiceSubmitExportTaskResult) ReadField0(iprot thrift.TProtocol) error {
+ _field := status.NewTStatus()
+ if err := _field.Read(iprot); err != nil {
+ return err
}
+ p.Success = _field
+ return nil
+}
- iprot.ReadMessageEnd()
- var err2 error
- result := BackendServiceCheckStorageFormatResult{}
- var retval *TCheckStorageFormatResult_
- if retval, err2 = p.handler.CheckStorageFormat(ctx); err2 != nil {
- x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing check_storage_format: "+err2.Error())
- oprot.WriteMessageBegin("check_storage_format", thrift.EXCEPTION, seqId)
- x.Write(oprot)
- oprot.WriteMessageEnd()
- oprot.Flush(ctx)
- return true, err2
- } else {
- result.Success = retval
- }
- if err2 = oprot.WriteMessageBegin("check_storage_format", thrift.REPLY, seqId); err2 != nil {
- err = err2
- }
- if err2 = result.Write(oprot); err == nil && err2 != nil {
- err = err2
+func (p *BackendServiceSubmitExportTaskResult) Write(oprot thrift.TProtocol) (err error) {
+ var fieldId int16
+ if err = oprot.WriteStructBegin("submit_export_task_result"); err != nil {
+ goto WriteStructBeginError
}
- if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil {
- err = err2
+ if p != nil {
+ if err = p.writeField0(oprot); err != nil {
+ fieldId = 0
+ goto WriteFieldError
+ }
}
- if err2 = oprot.Flush(ctx); err == nil && err2 != nil {
- err = err2
+ if err = oprot.WriteFieldStop(); err != nil {
+ goto WriteFieldStopError
}
- if err != nil {
- return
+ if err = oprot.WriteStructEnd(); err != nil {
+ goto WriteStructEndError
}
- return true, err
+ return nil
+WriteStructBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+WriteFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err)
+WriteFieldStopError:
+ return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err)
+WriteStructEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err)
}
-type backendServiceProcessorIngestBinlog struct {
- handler BackendService
+func (p *BackendServiceSubmitExportTaskResult) writeField0(oprot thrift.TProtocol) (err error) {
+ if p.IsSetSuccess() {
+ if err = oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := p.Success.Write(oprot); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 0 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 0 end error: ", p), err)
}
-func (p *backendServiceProcessorIngestBinlog) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
- args := BackendServiceIngestBinlogArgs{}
- if err = args.Read(iprot); err != nil {
- iprot.ReadMessageEnd()
- x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error())
- oprot.WriteMessageBegin("ingest_binlog", thrift.EXCEPTION, seqId)
- x.Write(oprot)
- oprot.WriteMessageEnd()
- oprot.Flush(ctx)
- return false, err
+func (p *BackendServiceSubmitExportTaskResult) String() string {
+ if p == nil {
+ return ""
}
+ return fmt.Sprintf("BackendServiceSubmitExportTaskResult(%+v)", *p)
- iprot.ReadMessageEnd()
- var err2 error
- result := BackendServiceIngestBinlogResult{}
- var retval *TIngestBinlogResult_
- if retval, err2 = p.handler.IngestBinlog(ctx, args.IngestBinlogRequest); err2 != nil {
- x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing ingest_binlog: "+err2.Error())
- oprot.WriteMessageBegin("ingest_binlog", thrift.EXCEPTION, seqId)
- x.Write(oprot)
- oprot.WriteMessageEnd()
- oprot.Flush(ctx)
- return true, err2
- } else {
- result.Success = retval
- }
- if err2 = oprot.WriteMessageBegin("ingest_binlog", thrift.REPLY, seqId); err2 != nil {
- err = err2
- }
- if err2 = result.Write(oprot); err == nil && err2 != nil {
- err = err2
- }
- if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil {
- err = err2
+}
+
+func (p *BackendServiceSubmitExportTaskResult) DeepEqual(ano *BackendServiceSubmitExportTaskResult) bool {
+ if p == ano {
+ return true
+ } else if p == nil || ano == nil {
+ return false
}
- if err2 = oprot.Flush(ctx); err == nil && err2 != nil {
- err = err2
+ if !p.Field0DeepEqual(ano.Success) {
+ return false
}
- if err != nil {
- return
+ return true
+}
+
+func (p *BackendServiceSubmitExportTaskResult) Field0DeepEqual(src *status.TStatus) bool {
+
+ if !p.Success.DeepEqual(src) {
+ return false
}
- return true, err
+ return true
}
-type BackendServiceExecPlanFragmentArgs struct {
- Params *palointernalservice.TExecPlanFragmentParams `thrift:"params,1" frugal:"1,default,palointernalservice.TExecPlanFragmentParams" json:"params"`
+type BackendServiceGetExportStatusArgs struct {
+ TaskId *types.TUniqueId `thrift:"task_id,1" frugal:"1,default,types.TUniqueId" json:"task_id"`
}
-func NewBackendServiceExecPlanFragmentArgs() *BackendServiceExecPlanFragmentArgs {
- return &BackendServiceExecPlanFragmentArgs{}
+func NewBackendServiceGetExportStatusArgs() *BackendServiceGetExportStatusArgs {
+ return &BackendServiceGetExportStatusArgs{}
}
-func (p *BackendServiceExecPlanFragmentArgs) InitDefault() {
- *p = BackendServiceExecPlanFragmentArgs{}
+func (p *BackendServiceGetExportStatusArgs) InitDefault() {
}
-var BackendServiceExecPlanFragmentArgs_Params_DEFAULT *palointernalservice.TExecPlanFragmentParams
+var BackendServiceGetExportStatusArgs_TaskId_DEFAULT *types.TUniqueId
-func (p *BackendServiceExecPlanFragmentArgs) GetParams() (v *palointernalservice.TExecPlanFragmentParams) {
- if !p.IsSetParams() {
- return BackendServiceExecPlanFragmentArgs_Params_DEFAULT
+func (p *BackendServiceGetExportStatusArgs) GetTaskId() (v *types.TUniqueId) {
+ if !p.IsSetTaskId() {
+ return BackendServiceGetExportStatusArgs_TaskId_DEFAULT
}
- return p.Params
+ return p.TaskId
}
-func (p *BackendServiceExecPlanFragmentArgs) SetParams(val *palointernalservice.TExecPlanFragmentParams) {
- p.Params = val
+func (p *BackendServiceGetExportStatusArgs) SetTaskId(val *types.TUniqueId) {
+ p.TaskId = val
}
-var fieldIDToName_BackendServiceExecPlanFragmentArgs = map[int16]string{
- 1: "params",
+var fieldIDToName_BackendServiceGetExportStatusArgs = map[int16]string{
+ 1: "task_id",
}
-func (p *BackendServiceExecPlanFragmentArgs) IsSetParams() bool {
- return p.Params != nil
+func (p *BackendServiceGetExportStatusArgs) IsSetTaskId() bool {
+ return p.TaskId != nil
}
-func (p *BackendServiceExecPlanFragmentArgs) Read(iprot thrift.TProtocol) (err error) {
+func (p *BackendServiceGetExportStatusArgs) Read(iprot thrift.TProtocol) (err error) {
var fieldTypeId thrift.TType
var fieldId int16
@@ -7912,17 +19904,14 @@ func (p *BackendServiceExecPlanFragmentArgs) Read(iprot thrift.TProtocol) (err e
if err = p.ReadField1(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
default:
if err = iprot.Skip(fieldTypeId); err != nil {
goto SkipFieldError
}
}
-
if err = iprot.ReadFieldEnd(); err != nil {
goto ReadFieldEndError
}
@@ -7937,7 +19926,7 @@ ReadStructBeginError:
ReadFieldBeginError:
return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
ReadFieldError:
- return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceExecPlanFragmentArgs[fieldId]), err)
+ return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceGetExportStatusArgs[fieldId]), err)
SkipFieldError:
return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
@@ -7947,17 +19936,18 @@ ReadStructEndError:
return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
}
-func (p *BackendServiceExecPlanFragmentArgs) ReadField1(iprot thrift.TProtocol) error {
- p.Params = palointernalservice.NewTExecPlanFragmentParams()
- if err := p.Params.Read(iprot); err != nil {
+func (p *BackendServiceGetExportStatusArgs) ReadField1(iprot thrift.TProtocol) error {
+ _field := types.NewTUniqueId()
+ if err := _field.Read(iprot); err != nil {
return err
}
+ p.TaskId = _field
return nil
}
-func (p *BackendServiceExecPlanFragmentArgs) Write(oprot thrift.TProtocol) (err error) {
+func (p *BackendServiceGetExportStatusArgs) Write(oprot thrift.TProtocol) (err error) {
var fieldId int16
- if err = oprot.WriteStructBegin("exec_plan_fragment_args"); err != nil {
+ if err = oprot.WriteStructBegin("get_export_status_args"); err != nil {
goto WriteStructBeginError
}
if p != nil {
@@ -7965,7 +19955,6 @@ func (p *BackendServiceExecPlanFragmentArgs) Write(oprot thrift.TProtocol) (err
fieldId = 1
goto WriteFieldError
}
-
}
if err = oprot.WriteFieldStop(); err != nil {
goto WriteFieldStopError
@@ -7984,11 +19973,11 @@ WriteStructEndError:
return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err)
}
-func (p *BackendServiceExecPlanFragmentArgs) writeField1(oprot thrift.TProtocol) (err error) {
- if err = oprot.WriteFieldBegin("params", thrift.STRUCT, 1); err != nil {
+func (p *BackendServiceGetExportStatusArgs) writeField1(oprot thrift.TProtocol) (err error) {
+ if err = oprot.WriteFieldBegin("task_id", thrift.STRUCT, 1); err != nil {
goto WriteFieldBeginError
}
- if err := p.Params.Write(oprot); err != nil {
+ if err := p.TaskId.Write(oprot); err != nil {
return err
}
if err = oprot.WriteFieldEnd(); err != nil {
@@ -8001,66 +19990,66 @@ WriteFieldEndError:
return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err)
}
-func (p *BackendServiceExecPlanFragmentArgs) String() string {
+func (p *BackendServiceGetExportStatusArgs) String() string {
if p == nil {
return ""
}
- return fmt.Sprintf("BackendServiceExecPlanFragmentArgs(%+v)", *p)
+ return fmt.Sprintf("BackendServiceGetExportStatusArgs(%+v)", *p)
+
}
-func (p *BackendServiceExecPlanFragmentArgs) DeepEqual(ano *BackendServiceExecPlanFragmentArgs) bool {
+func (p *BackendServiceGetExportStatusArgs) DeepEqual(ano *BackendServiceGetExportStatusArgs) bool {
if p == ano {
return true
} else if p == nil || ano == nil {
return false
}
- if !p.Field1DeepEqual(ano.Params) {
+ if !p.Field1DeepEqual(ano.TaskId) {
return false
}
return true
}
-func (p *BackendServiceExecPlanFragmentArgs) Field1DeepEqual(src *palointernalservice.TExecPlanFragmentParams) bool {
+func (p *BackendServiceGetExportStatusArgs) Field1DeepEqual(src *types.TUniqueId) bool {
- if !p.Params.DeepEqual(src) {
+ if !p.TaskId.DeepEqual(src) {
return false
}
return true
}
-type BackendServiceExecPlanFragmentResult struct {
- Success *palointernalservice.TExecPlanFragmentResult_ `thrift:"success,0,optional" frugal:"0,optional,palointernalservice.TExecPlanFragmentResult_" json:"success,omitempty"`
+type BackendServiceGetExportStatusResult struct {
+ Success *palointernalservice.TExportStatusResult_ `thrift:"success,0,optional" frugal:"0,optional,palointernalservice.TExportStatusResult_" json:"success,omitempty"`
}
-func NewBackendServiceExecPlanFragmentResult() *BackendServiceExecPlanFragmentResult {
- return &BackendServiceExecPlanFragmentResult{}
+func NewBackendServiceGetExportStatusResult() *BackendServiceGetExportStatusResult {
+ return &BackendServiceGetExportStatusResult{}
}
-func (p *BackendServiceExecPlanFragmentResult) InitDefault() {
- *p = BackendServiceExecPlanFragmentResult{}
+func (p *BackendServiceGetExportStatusResult) InitDefault() {
}
-var BackendServiceExecPlanFragmentResult_Success_DEFAULT *palointernalservice.TExecPlanFragmentResult_
+var BackendServiceGetExportStatusResult_Success_DEFAULT *palointernalservice.TExportStatusResult_
-func (p *BackendServiceExecPlanFragmentResult) GetSuccess() (v *palointernalservice.TExecPlanFragmentResult_) {
+func (p *BackendServiceGetExportStatusResult) GetSuccess() (v *palointernalservice.TExportStatusResult_) {
if !p.IsSetSuccess() {
- return BackendServiceExecPlanFragmentResult_Success_DEFAULT
+ return BackendServiceGetExportStatusResult_Success_DEFAULT
}
return p.Success
}
-func (p *BackendServiceExecPlanFragmentResult) SetSuccess(x interface{}) {
- p.Success = x.(*palointernalservice.TExecPlanFragmentResult_)
+func (p *BackendServiceGetExportStatusResult) SetSuccess(x interface{}) {
+ p.Success = x.(*palointernalservice.TExportStatusResult_)
}
-var fieldIDToName_BackendServiceExecPlanFragmentResult = map[int16]string{
+var fieldIDToName_BackendServiceGetExportStatusResult = map[int16]string{
0: "success",
}
-func (p *BackendServiceExecPlanFragmentResult) IsSetSuccess() bool {
+func (p *BackendServiceGetExportStatusResult) IsSetSuccess() bool {
return p.Success != nil
}
-func (p *BackendServiceExecPlanFragmentResult) Read(iprot thrift.TProtocol) (err error) {
+func (p *BackendServiceGetExportStatusResult) Read(iprot thrift.TProtocol) (err error) {
var fieldTypeId thrift.TType
var fieldId int16
@@ -8084,17 +20073,14 @@ func (p *BackendServiceExecPlanFragmentResult) Read(iprot thrift.TProtocol) (err
if err = p.ReadField0(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
default:
if err = iprot.Skip(fieldTypeId); err != nil {
goto SkipFieldError
}
}
-
if err = iprot.ReadFieldEnd(); err != nil {
goto ReadFieldEndError
}
@@ -8109,7 +20095,7 @@ ReadStructBeginError:
ReadFieldBeginError:
return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
ReadFieldError:
- return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceExecPlanFragmentResult[fieldId]), err)
+ return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceGetExportStatusResult[fieldId]), err)
SkipFieldError:
return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
@@ -8119,17 +20105,18 @@ ReadStructEndError:
return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
}
-func (p *BackendServiceExecPlanFragmentResult) ReadField0(iprot thrift.TProtocol) error {
- p.Success = palointernalservice.NewTExecPlanFragmentResult_()
- if err := p.Success.Read(iprot); err != nil {
+func (p *BackendServiceGetExportStatusResult) ReadField0(iprot thrift.TProtocol) error {
+ _field := palointernalservice.NewTExportStatusResult_()
+ if err := _field.Read(iprot); err != nil {
return err
}
+ p.Success = _field
return nil
}
-func (p *BackendServiceExecPlanFragmentResult) Write(oprot thrift.TProtocol) (err error) {
+func (p *BackendServiceGetExportStatusResult) Write(oprot thrift.TProtocol) (err error) {
var fieldId int16
- if err = oprot.WriteStructBegin("exec_plan_fragment_result"); err != nil {
+ if err = oprot.WriteStructBegin("get_export_status_result"); err != nil {
goto WriteStructBeginError
}
if p != nil {
@@ -8137,7 +20124,6 @@ func (p *BackendServiceExecPlanFragmentResult) Write(oprot thrift.TProtocol) (er
fieldId = 0
goto WriteFieldError
}
-
}
if err = oprot.WriteFieldStop(); err != nil {
goto WriteFieldStopError
@@ -8156,7 +20142,7 @@ WriteStructEndError:
return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err)
}
-func (p *BackendServiceExecPlanFragmentResult) writeField0(oprot thrift.TProtocol) (err error) {
+func (p *BackendServiceGetExportStatusResult) writeField0(oprot thrift.TProtocol) (err error) {
if p.IsSetSuccess() {
if err = oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil {
goto WriteFieldBeginError
@@ -8175,14 +20161,15 @@ WriteFieldEndError:
return thrift.PrependError(fmt.Sprintf("%T write field 0 end error: ", p), err)
}
-func (p *BackendServiceExecPlanFragmentResult) String() string {
+func (p *BackendServiceGetExportStatusResult) String() string {
if p == nil {
return ""
}
- return fmt.Sprintf("BackendServiceExecPlanFragmentResult(%+v)", *p)
+ return fmt.Sprintf("BackendServiceGetExportStatusResult(%+v)", *p)
+
}
-func (p *BackendServiceExecPlanFragmentResult) DeepEqual(ano *BackendServiceExecPlanFragmentResult) bool {
+func (p *BackendServiceGetExportStatusResult) DeepEqual(ano *BackendServiceGetExportStatusResult) bool {
if p == ano {
return true
} else if p == nil || ano == nil {
@@ -8194,7 +20181,7 @@ func (p *BackendServiceExecPlanFragmentResult) DeepEqual(ano *BackendServiceExec
return true
}
-func (p *BackendServiceExecPlanFragmentResult) Field0DeepEqual(src *palointernalservice.TExecPlanFragmentResult_) bool {
+func (p *BackendServiceGetExportStatusResult) Field0DeepEqual(src *palointernalservice.TExportStatusResult_) bool {
if !p.Success.DeepEqual(src) {
return false
@@ -8202,39 +20189,38 @@ func (p *BackendServiceExecPlanFragmentResult) Field0DeepEqual(src *palointernal
return true
}
-type BackendServiceCancelPlanFragmentArgs struct {
- Params *palointernalservice.TCancelPlanFragmentParams `thrift:"params,1" frugal:"1,default,palointernalservice.TCancelPlanFragmentParams" json:"params"`
+type BackendServiceEraseExportTaskArgs struct {
+ TaskId *types.TUniqueId `thrift:"task_id,1" frugal:"1,default,types.TUniqueId" json:"task_id"`
}
-func NewBackendServiceCancelPlanFragmentArgs() *BackendServiceCancelPlanFragmentArgs {
- return &BackendServiceCancelPlanFragmentArgs{}
+func NewBackendServiceEraseExportTaskArgs() *BackendServiceEraseExportTaskArgs {
+ return &BackendServiceEraseExportTaskArgs{}
}
-func (p *BackendServiceCancelPlanFragmentArgs) InitDefault() {
- *p = BackendServiceCancelPlanFragmentArgs{}
+func (p *BackendServiceEraseExportTaskArgs) InitDefault() {
}
-var BackendServiceCancelPlanFragmentArgs_Params_DEFAULT *palointernalservice.TCancelPlanFragmentParams
+var BackendServiceEraseExportTaskArgs_TaskId_DEFAULT *types.TUniqueId
-func (p *BackendServiceCancelPlanFragmentArgs) GetParams() (v *palointernalservice.TCancelPlanFragmentParams) {
- if !p.IsSetParams() {
- return BackendServiceCancelPlanFragmentArgs_Params_DEFAULT
+func (p *BackendServiceEraseExportTaskArgs) GetTaskId() (v *types.TUniqueId) {
+ if !p.IsSetTaskId() {
+ return BackendServiceEraseExportTaskArgs_TaskId_DEFAULT
}
- return p.Params
+ return p.TaskId
}
-func (p *BackendServiceCancelPlanFragmentArgs) SetParams(val *palointernalservice.TCancelPlanFragmentParams) {
- p.Params = val
+func (p *BackendServiceEraseExportTaskArgs) SetTaskId(val *types.TUniqueId) {
+ p.TaskId = val
}
-var fieldIDToName_BackendServiceCancelPlanFragmentArgs = map[int16]string{
- 1: "params",
+var fieldIDToName_BackendServiceEraseExportTaskArgs = map[int16]string{
+ 1: "task_id",
}
-func (p *BackendServiceCancelPlanFragmentArgs) IsSetParams() bool {
- return p.Params != nil
+func (p *BackendServiceEraseExportTaskArgs) IsSetTaskId() bool {
+ return p.TaskId != nil
}
-func (p *BackendServiceCancelPlanFragmentArgs) Read(iprot thrift.TProtocol) (err error) {
+func (p *BackendServiceEraseExportTaskArgs) Read(iprot thrift.TProtocol) (err error) {
var fieldTypeId thrift.TType
var fieldId int16
@@ -8258,17 +20244,14 @@ func (p *BackendServiceCancelPlanFragmentArgs) Read(iprot thrift.TProtocol) (err
if err = p.ReadField1(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
default:
if err = iprot.Skip(fieldTypeId); err != nil {
goto SkipFieldError
}
}
-
if err = iprot.ReadFieldEnd(); err != nil {
goto ReadFieldEndError
}
@@ -8283,7 +20266,7 @@ ReadStructBeginError:
ReadFieldBeginError:
return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
ReadFieldError:
- return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceCancelPlanFragmentArgs[fieldId]), err)
+ return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceEraseExportTaskArgs[fieldId]), err)
SkipFieldError:
return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
@@ -8293,17 +20276,18 @@ ReadStructEndError:
return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
}
-func (p *BackendServiceCancelPlanFragmentArgs) ReadField1(iprot thrift.TProtocol) error {
- p.Params = palointernalservice.NewTCancelPlanFragmentParams()
- if err := p.Params.Read(iprot); err != nil {
+func (p *BackendServiceEraseExportTaskArgs) ReadField1(iprot thrift.TProtocol) error {
+ _field := types.NewTUniqueId()
+ if err := _field.Read(iprot); err != nil {
return err
}
+ p.TaskId = _field
return nil
}
-func (p *BackendServiceCancelPlanFragmentArgs) Write(oprot thrift.TProtocol) (err error) {
+func (p *BackendServiceEraseExportTaskArgs) Write(oprot thrift.TProtocol) (err error) {
var fieldId int16
- if err = oprot.WriteStructBegin("cancel_plan_fragment_args"); err != nil {
+ if err = oprot.WriteStructBegin("erase_export_task_args"); err != nil {
goto WriteStructBeginError
}
if p != nil {
@@ -8311,7 +20295,6 @@ func (p *BackendServiceCancelPlanFragmentArgs) Write(oprot thrift.TProtocol) (er
fieldId = 1
goto WriteFieldError
}
-
}
if err = oprot.WriteFieldStop(); err != nil {
goto WriteFieldStopError
@@ -8330,11 +20313,11 @@ WriteStructEndError:
return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err)
}
-func (p *BackendServiceCancelPlanFragmentArgs) writeField1(oprot thrift.TProtocol) (err error) {
- if err = oprot.WriteFieldBegin("params", thrift.STRUCT, 1); err != nil {
+func (p *BackendServiceEraseExportTaskArgs) writeField1(oprot thrift.TProtocol) (err error) {
+ if err = oprot.WriteFieldBegin("task_id", thrift.STRUCT, 1); err != nil {
goto WriteFieldBeginError
}
- if err := p.Params.Write(oprot); err != nil {
+ if err := p.TaskId.Write(oprot); err != nil {
return err
}
if err = oprot.WriteFieldEnd(); err != nil {
@@ -8347,66 +20330,66 @@ WriteFieldEndError:
return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err)
}
-func (p *BackendServiceCancelPlanFragmentArgs) String() string {
+func (p *BackendServiceEraseExportTaskArgs) String() string {
if p == nil {
return ""
}
- return fmt.Sprintf("BackendServiceCancelPlanFragmentArgs(%+v)", *p)
+ return fmt.Sprintf("BackendServiceEraseExportTaskArgs(%+v)", *p)
+
}
-func (p *BackendServiceCancelPlanFragmentArgs) DeepEqual(ano *BackendServiceCancelPlanFragmentArgs) bool {
+func (p *BackendServiceEraseExportTaskArgs) DeepEqual(ano *BackendServiceEraseExportTaskArgs) bool {
if p == ano {
return true
} else if p == nil || ano == nil {
return false
}
- if !p.Field1DeepEqual(ano.Params) {
+ if !p.Field1DeepEqual(ano.TaskId) {
return false
}
return true
}
-func (p *BackendServiceCancelPlanFragmentArgs) Field1DeepEqual(src *palointernalservice.TCancelPlanFragmentParams) bool {
+func (p *BackendServiceEraseExportTaskArgs) Field1DeepEqual(src *types.TUniqueId) bool {
- if !p.Params.DeepEqual(src) {
+ if !p.TaskId.DeepEqual(src) {
return false
}
return true
}
-type BackendServiceCancelPlanFragmentResult struct {
- Success *palointernalservice.TCancelPlanFragmentResult_ `thrift:"success,0,optional" frugal:"0,optional,palointernalservice.TCancelPlanFragmentResult_" json:"success,omitempty"`
+type BackendServiceEraseExportTaskResult struct {
+ Success *status.TStatus `thrift:"success,0,optional" frugal:"0,optional,status.TStatus" json:"success,omitempty"`
}
-func NewBackendServiceCancelPlanFragmentResult() *BackendServiceCancelPlanFragmentResult {
- return &BackendServiceCancelPlanFragmentResult{}
+func NewBackendServiceEraseExportTaskResult() *BackendServiceEraseExportTaskResult {
+ return &BackendServiceEraseExportTaskResult{}
}
-func (p *BackendServiceCancelPlanFragmentResult) InitDefault() {
- *p = BackendServiceCancelPlanFragmentResult{}
+func (p *BackendServiceEraseExportTaskResult) InitDefault() {
}
-var BackendServiceCancelPlanFragmentResult_Success_DEFAULT *palointernalservice.TCancelPlanFragmentResult_
+var BackendServiceEraseExportTaskResult_Success_DEFAULT *status.TStatus
-func (p *BackendServiceCancelPlanFragmentResult) GetSuccess() (v *palointernalservice.TCancelPlanFragmentResult_) {
+func (p *BackendServiceEraseExportTaskResult) GetSuccess() (v *status.TStatus) {
if !p.IsSetSuccess() {
- return BackendServiceCancelPlanFragmentResult_Success_DEFAULT
+ return BackendServiceEraseExportTaskResult_Success_DEFAULT
}
return p.Success
}
-func (p *BackendServiceCancelPlanFragmentResult) SetSuccess(x interface{}) {
- p.Success = x.(*palointernalservice.TCancelPlanFragmentResult_)
+func (p *BackendServiceEraseExportTaskResult) SetSuccess(x interface{}) {
+ p.Success = x.(*status.TStatus)
}
-var fieldIDToName_BackendServiceCancelPlanFragmentResult = map[int16]string{
+var fieldIDToName_BackendServiceEraseExportTaskResult = map[int16]string{
0: "success",
}
-func (p *BackendServiceCancelPlanFragmentResult) IsSetSuccess() bool {
+func (p *BackendServiceEraseExportTaskResult) IsSetSuccess() bool {
return p.Success != nil
}
-func (p *BackendServiceCancelPlanFragmentResult) Read(iprot thrift.TProtocol) (err error) {
+func (p *BackendServiceEraseExportTaskResult) Read(iprot thrift.TProtocol) (err error) {
var fieldTypeId thrift.TType
var fieldId int16
@@ -8430,17 +20413,14 @@ func (p *BackendServiceCancelPlanFragmentResult) Read(iprot thrift.TProtocol) (e
if err = p.ReadField0(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
default:
if err = iprot.Skip(fieldTypeId); err != nil {
goto SkipFieldError
}
}
-
if err = iprot.ReadFieldEnd(); err != nil {
goto ReadFieldEndError
}
@@ -8455,7 +20435,7 @@ ReadStructBeginError:
ReadFieldBeginError:
return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
ReadFieldError:
- return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceCancelPlanFragmentResult[fieldId]), err)
+ return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceEraseExportTaskResult[fieldId]), err)
SkipFieldError:
return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
@@ -8465,17 +20445,18 @@ ReadStructEndError:
return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
}
-func (p *BackendServiceCancelPlanFragmentResult) ReadField0(iprot thrift.TProtocol) error {
- p.Success = palointernalservice.NewTCancelPlanFragmentResult_()
- if err := p.Success.Read(iprot); err != nil {
+func (p *BackendServiceEraseExportTaskResult) ReadField0(iprot thrift.TProtocol) error {
+ _field := status.NewTStatus()
+ if err := _field.Read(iprot); err != nil {
return err
}
+ p.Success = _field
return nil
}
-func (p *BackendServiceCancelPlanFragmentResult) Write(oprot thrift.TProtocol) (err error) {
+func (p *BackendServiceEraseExportTaskResult) Write(oprot thrift.TProtocol) (err error) {
var fieldId int16
- if err = oprot.WriteStructBegin("cancel_plan_fragment_result"); err != nil {
+ if err = oprot.WriteStructBegin("erase_export_task_result"); err != nil {
goto WriteStructBeginError
}
if p != nil {
@@ -8483,7 +20464,6 @@ func (p *BackendServiceCancelPlanFragmentResult) Write(oprot thrift.TProtocol) (
fieldId = 0
goto WriteFieldError
}
-
}
if err = oprot.WriteFieldStop(); err != nil {
goto WriteFieldStopError
@@ -8502,7 +20482,7 @@ WriteStructEndError:
return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err)
}
-func (p *BackendServiceCancelPlanFragmentResult) writeField0(oprot thrift.TProtocol) (err error) {
+func (p *BackendServiceEraseExportTaskResult) writeField0(oprot thrift.TProtocol) (err error) {
if p.IsSetSuccess() {
if err = oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil {
goto WriteFieldBeginError
@@ -8521,14 +20501,15 @@ WriteFieldEndError:
return thrift.PrependError(fmt.Sprintf("%T write field 0 end error: ", p), err)
}
-func (p *BackendServiceCancelPlanFragmentResult) String() string {
+func (p *BackendServiceEraseExportTaskResult) String() string {
if p == nil {
return ""
}
- return fmt.Sprintf("BackendServiceCancelPlanFragmentResult(%+v)", *p)
+ return fmt.Sprintf("BackendServiceEraseExportTaskResult(%+v)", *p)
+
}
-func (p *BackendServiceCancelPlanFragmentResult) DeepEqual(ano *BackendServiceCancelPlanFragmentResult) bool {
+func (p *BackendServiceEraseExportTaskResult) DeepEqual(ano *BackendServiceEraseExportTaskResult) bool {
if p == ano {
return true
} else if p == nil || ano == nil {
@@ -8540,7 +20521,7 @@ func (p *BackendServiceCancelPlanFragmentResult) DeepEqual(ano *BackendServiceCa
return true
}
-func (p *BackendServiceCancelPlanFragmentResult) Field0DeepEqual(src *palointernalservice.TCancelPlanFragmentResult_) bool {
+func (p *BackendServiceEraseExportTaskResult) Field0DeepEqual(src *status.TStatus) bool {
if !p.Success.DeepEqual(src) {
return false
@@ -8548,39 +20529,19 @@ func (p *BackendServiceCancelPlanFragmentResult) Field0DeepEqual(src *palointern
return true
}
-type BackendServiceTransmitDataArgs struct {
- Params *palointernalservice.TTransmitDataParams `thrift:"params,1" frugal:"1,default,palointernalservice.TTransmitDataParams" json:"params"`
-}
-
-func NewBackendServiceTransmitDataArgs() *BackendServiceTransmitDataArgs {
- return &BackendServiceTransmitDataArgs{}
-}
-
-func (p *BackendServiceTransmitDataArgs) InitDefault() {
- *p = BackendServiceTransmitDataArgs{}
+type BackendServiceGetTabletStatArgs struct {
}
-var BackendServiceTransmitDataArgs_Params_DEFAULT *palointernalservice.TTransmitDataParams
-
-func (p *BackendServiceTransmitDataArgs) GetParams() (v *palointernalservice.TTransmitDataParams) {
- if !p.IsSetParams() {
- return BackendServiceTransmitDataArgs_Params_DEFAULT
- }
- return p.Params
-}
-func (p *BackendServiceTransmitDataArgs) SetParams(val *palointernalservice.TTransmitDataParams) {
- p.Params = val
+func NewBackendServiceGetTabletStatArgs() *BackendServiceGetTabletStatArgs {
+ return &BackendServiceGetTabletStatArgs{}
}
-var fieldIDToName_BackendServiceTransmitDataArgs = map[int16]string{
- 1: "params",
+func (p *BackendServiceGetTabletStatArgs) InitDefault() {
}
-func (p *BackendServiceTransmitDataArgs) IsSetParams() bool {
- return p.Params != nil
-}
+var fieldIDToName_BackendServiceGetTabletStatArgs = map[int16]string{}
-func (p *BackendServiceTransmitDataArgs) Read(iprot thrift.TProtocol) (err error) {
+func (p *BackendServiceGetTabletStatArgs) Read(iprot thrift.TProtocol) (err error) {
var fieldTypeId thrift.TType
var fieldId int16
@@ -8597,24 +20558,9 @@ func (p *BackendServiceTransmitDataArgs) Read(iprot thrift.TProtocol) (err error
if fieldTypeId == thrift.STOP {
break
}
-
- switch fieldId {
- case 1:
- if fieldTypeId == thrift.STRUCT {
- if err = p.ReadField1(iprot); err != nil {
- goto ReadFieldError
- }
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
- }
- default:
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldTypeError
}
-
if err = iprot.ReadFieldEnd(); err != nil {
goto ReadFieldEndError
}
@@ -8628,10 +20574,8 @@ ReadStructBeginError:
return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err)
ReadFieldBeginError:
return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
-ReadFieldError:
- return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceTransmitDataArgs[fieldId]), err)
-SkipFieldError:
- return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
+SkipFieldTypeError:
+ return thrift.PrependError(fmt.Sprintf("%T skip field type %d error", p, fieldTypeId), err)
ReadFieldEndError:
return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err)
@@ -8639,25 +20583,11 @@ ReadStructEndError:
return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
}
-func (p *BackendServiceTransmitDataArgs) ReadField1(iprot thrift.TProtocol) error {
- p.Params = palointernalservice.NewTTransmitDataParams()
- if err := p.Params.Read(iprot); err != nil {
- return err
- }
- return nil
-}
-
-func (p *BackendServiceTransmitDataArgs) Write(oprot thrift.TProtocol) (err error) {
- var fieldId int16
- if err = oprot.WriteStructBegin("transmit_data_args"); err != nil {
+func (p *BackendServiceGetTabletStatArgs) Write(oprot thrift.TProtocol) (err error) {
+ if err = oprot.WriteStructBegin("get_tablet_stat_args"); err != nil {
goto WriteStructBeginError
}
if p != nil {
- if err = p.writeField1(oprot); err != nil {
- fieldId = 1
- goto WriteFieldError
- }
-
}
if err = oprot.WriteFieldStop(); err != nil {
goto WriteFieldStopError
@@ -8668,91 +20598,61 @@ func (p *BackendServiceTransmitDataArgs) Write(oprot thrift.TProtocol) (err erro
return nil
WriteStructBeginError:
return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
-WriteFieldError:
- return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err)
WriteFieldStopError:
- return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err)
-WriteStructEndError:
- return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err)
-}
-
-func (p *BackendServiceTransmitDataArgs) writeField1(oprot thrift.TProtocol) (err error) {
- if err = oprot.WriteFieldBegin("params", thrift.STRUCT, 1); err != nil {
- goto WriteFieldBeginError
- }
- if err := p.Params.Write(oprot); err != nil {
- return err
- }
- if err = oprot.WriteFieldEnd(); err != nil {
- goto WriteFieldEndError
- }
- return nil
-WriteFieldBeginError:
- return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err)
-WriteFieldEndError:
- return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err)
+ return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err)
+WriteStructEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err)
}
-func (p *BackendServiceTransmitDataArgs) String() string {
+func (p *BackendServiceGetTabletStatArgs) String() string {
if p == nil {
return ""
}
- return fmt.Sprintf("BackendServiceTransmitDataArgs(%+v)", *p)
+ return fmt.Sprintf("BackendServiceGetTabletStatArgs(%+v)", *p)
+
}
-func (p *BackendServiceTransmitDataArgs) DeepEqual(ano *BackendServiceTransmitDataArgs) bool {
+func (p *BackendServiceGetTabletStatArgs) DeepEqual(ano *BackendServiceGetTabletStatArgs) bool {
if p == ano {
return true
} else if p == nil || ano == nil {
return false
}
- if !p.Field1DeepEqual(ano.Params) {
- return false
- }
- return true
-}
-
-func (p *BackendServiceTransmitDataArgs) Field1DeepEqual(src *palointernalservice.TTransmitDataParams) bool {
-
- if !p.Params.DeepEqual(src) {
- return false
- }
return true
}
-type BackendServiceTransmitDataResult struct {
- Success *palointernalservice.TTransmitDataResult_ `thrift:"success,0,optional" frugal:"0,optional,palointernalservice.TTransmitDataResult_" json:"success,omitempty"`
+type BackendServiceGetTabletStatResult struct {
+ Success *TTabletStatResult_ `thrift:"success,0,optional" frugal:"0,optional,TTabletStatResult_" json:"success,omitempty"`
}
-func NewBackendServiceTransmitDataResult() *BackendServiceTransmitDataResult {
- return &BackendServiceTransmitDataResult{}
+func NewBackendServiceGetTabletStatResult() *BackendServiceGetTabletStatResult {
+ return &BackendServiceGetTabletStatResult{}
}
-func (p *BackendServiceTransmitDataResult) InitDefault() {
- *p = BackendServiceTransmitDataResult{}
+func (p *BackendServiceGetTabletStatResult) InitDefault() {
}
-var BackendServiceTransmitDataResult_Success_DEFAULT *palointernalservice.TTransmitDataResult_
+var BackendServiceGetTabletStatResult_Success_DEFAULT *TTabletStatResult_
-func (p *BackendServiceTransmitDataResult) GetSuccess() (v *palointernalservice.TTransmitDataResult_) {
+func (p *BackendServiceGetTabletStatResult) GetSuccess() (v *TTabletStatResult_) {
if !p.IsSetSuccess() {
- return BackendServiceTransmitDataResult_Success_DEFAULT
+ return BackendServiceGetTabletStatResult_Success_DEFAULT
}
return p.Success
}
-func (p *BackendServiceTransmitDataResult) SetSuccess(x interface{}) {
- p.Success = x.(*palointernalservice.TTransmitDataResult_)
+func (p *BackendServiceGetTabletStatResult) SetSuccess(x interface{}) {
+ p.Success = x.(*TTabletStatResult_)
}
-var fieldIDToName_BackendServiceTransmitDataResult = map[int16]string{
+var fieldIDToName_BackendServiceGetTabletStatResult = map[int16]string{
0: "success",
}
-func (p *BackendServiceTransmitDataResult) IsSetSuccess() bool {
+func (p *BackendServiceGetTabletStatResult) IsSetSuccess() bool {
return p.Success != nil
}
-func (p *BackendServiceTransmitDataResult) Read(iprot thrift.TProtocol) (err error) {
+func (p *BackendServiceGetTabletStatResult) Read(iprot thrift.TProtocol) (err error) {
var fieldTypeId thrift.TType
var fieldId int16
@@ -8776,17 +20676,14 @@ func (p *BackendServiceTransmitDataResult) Read(iprot thrift.TProtocol) (err err
if err = p.ReadField0(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
default:
if err = iprot.Skip(fieldTypeId); err != nil {
goto SkipFieldError
}
}
-
if err = iprot.ReadFieldEnd(); err != nil {
goto ReadFieldEndError
}
@@ -8801,7 +20698,7 @@ ReadStructBeginError:
ReadFieldBeginError:
return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
ReadFieldError:
- return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceTransmitDataResult[fieldId]), err)
+ return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceGetTabletStatResult[fieldId]), err)
SkipFieldError:
return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
@@ -8811,17 +20708,18 @@ ReadStructEndError:
return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
}
-func (p *BackendServiceTransmitDataResult) ReadField0(iprot thrift.TProtocol) error {
- p.Success = palointernalservice.NewTTransmitDataResult_()
- if err := p.Success.Read(iprot); err != nil {
+func (p *BackendServiceGetTabletStatResult) ReadField0(iprot thrift.TProtocol) error {
+ _field := NewTTabletStatResult_()
+ if err := _field.Read(iprot); err != nil {
return err
}
+ p.Success = _field
return nil
}
-func (p *BackendServiceTransmitDataResult) Write(oprot thrift.TProtocol) (err error) {
+func (p *BackendServiceGetTabletStatResult) Write(oprot thrift.TProtocol) (err error) {
var fieldId int16
- if err = oprot.WriteStructBegin("transmit_data_result"); err != nil {
+ if err = oprot.WriteStructBegin("get_tablet_stat_result"); err != nil {
goto WriteStructBeginError
}
if p != nil {
@@ -8829,7 +20727,6 @@ func (p *BackendServiceTransmitDataResult) Write(oprot thrift.TProtocol) (err er
fieldId = 0
goto WriteFieldError
}
-
}
if err = oprot.WriteFieldStop(); err != nil {
goto WriteFieldStopError
@@ -8848,7 +20745,7 @@ WriteStructEndError:
return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err)
}
-func (p *BackendServiceTransmitDataResult) writeField0(oprot thrift.TProtocol) (err error) {
+func (p *BackendServiceGetTabletStatResult) writeField0(oprot thrift.TProtocol) (err error) {
if p.IsSetSuccess() {
if err = oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil {
goto WriteFieldBeginError
@@ -8867,14 +20764,15 @@ WriteFieldEndError:
return thrift.PrependError(fmt.Sprintf("%T write field 0 end error: ", p), err)
}
-func (p *BackendServiceTransmitDataResult) String() string {
+func (p *BackendServiceGetTabletStatResult) String() string {
if p == nil {
return ""
}
- return fmt.Sprintf("BackendServiceTransmitDataResult(%+v)", *p)
+ return fmt.Sprintf("BackendServiceGetTabletStatResult(%+v)", *p)
+
}
-func (p *BackendServiceTransmitDataResult) DeepEqual(ano *BackendServiceTransmitDataResult) bool {
+func (p *BackendServiceGetTabletStatResult) DeepEqual(ano *BackendServiceGetTabletStatResult) bool {
if p == ano {
return true
} else if p == nil || ano == nil {
@@ -8886,7 +20784,7 @@ func (p *BackendServiceTransmitDataResult) DeepEqual(ano *BackendServiceTransmit
return true
}
-func (p *BackendServiceTransmitDataResult) Field0DeepEqual(src *palointernalservice.TTransmitDataResult_) bool {
+func (p *BackendServiceGetTabletStatResult) Field0DeepEqual(src *TTabletStatResult_) bool {
if !p.Success.DeepEqual(src) {
return false
@@ -8894,30 +20792,19 @@ func (p *BackendServiceTransmitDataResult) Field0DeepEqual(src *palointernalserv
return true
}
-type BackendServiceSubmitTasksArgs struct {
- Tasks []*agentservice.TAgentTaskRequest `thrift:"tasks,1" frugal:"1,default,list" json:"tasks"`
-}
-
-func NewBackendServiceSubmitTasksArgs() *BackendServiceSubmitTasksArgs {
- return &BackendServiceSubmitTasksArgs{}
+type BackendServiceGetTrashUsedCapacityArgs struct {
}
-func (p *BackendServiceSubmitTasksArgs) InitDefault() {
- *p = BackendServiceSubmitTasksArgs{}
+func NewBackendServiceGetTrashUsedCapacityArgs() *BackendServiceGetTrashUsedCapacityArgs {
+ return &BackendServiceGetTrashUsedCapacityArgs{}
}
-func (p *BackendServiceSubmitTasksArgs) GetTasks() (v []*agentservice.TAgentTaskRequest) {
- return p.Tasks
-}
-func (p *BackendServiceSubmitTasksArgs) SetTasks(val []*agentservice.TAgentTaskRequest) {
- p.Tasks = val
+func (p *BackendServiceGetTrashUsedCapacityArgs) InitDefault() {
}
-var fieldIDToName_BackendServiceSubmitTasksArgs = map[int16]string{
- 1: "tasks",
-}
+var fieldIDToName_BackendServiceGetTrashUsedCapacityArgs = map[int16]string{}
-func (p *BackendServiceSubmitTasksArgs) Read(iprot thrift.TProtocol) (err error) {
+func (p *BackendServiceGetTrashUsedCapacityArgs) Read(iprot thrift.TProtocol) (err error) {
var fieldTypeId thrift.TType
var fieldId int16
@@ -8934,24 +20821,9 @@ func (p *BackendServiceSubmitTasksArgs) Read(iprot thrift.TProtocol) (err error)
if fieldTypeId == thrift.STOP {
break
}
-
- switch fieldId {
- case 1:
- if fieldTypeId == thrift.LIST {
- if err = p.ReadField1(iprot); err != nil {
- goto ReadFieldError
- }
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
- }
- default:
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldTypeError
}
-
if err = iprot.ReadFieldEnd(); err != nil {
goto ReadFieldEndError
}
@@ -8965,10 +20837,8 @@ ReadStructBeginError:
return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err)
ReadFieldBeginError:
return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
-ReadFieldError:
- return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceSubmitTasksArgs[fieldId]), err)
-SkipFieldError:
- return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
+SkipFieldTypeError:
+ return thrift.PrependError(fmt.Sprintf("%T skip field type %d error", p, fieldTypeId), err)
ReadFieldEndError:
return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err)
@@ -8976,37 +20846,11 @@ ReadStructEndError:
return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
}
-func (p *BackendServiceSubmitTasksArgs) ReadField1(iprot thrift.TProtocol) error {
- _, size, err := iprot.ReadListBegin()
- if err != nil {
- return err
- }
- p.Tasks = make([]*agentservice.TAgentTaskRequest, 0, size)
- for i := 0; i < size; i++ {
- _elem := agentservice.NewTAgentTaskRequest()
- if err := _elem.Read(iprot); err != nil {
- return err
- }
-
- p.Tasks = append(p.Tasks, _elem)
- }
- if err := iprot.ReadListEnd(); err != nil {
- return err
- }
- return nil
-}
-
-func (p *BackendServiceSubmitTasksArgs) Write(oprot thrift.TProtocol) (err error) {
- var fieldId int16
- if err = oprot.WriteStructBegin("submit_tasks_args"); err != nil {
+func (p *BackendServiceGetTrashUsedCapacityArgs) Write(oprot thrift.TProtocol) (err error) {
+ if err = oprot.WriteStructBegin("get_trash_used_capacity_args"); err != nil {
goto WriteStructBeginError
}
if p != nil {
- if err = p.writeField1(oprot); err != nil {
- fieldId = 1
- goto WriteFieldError
- }
-
}
if err = oprot.WriteFieldStop(); err != nil {
goto WriteFieldStopError
@@ -9017,105 +20861,61 @@ func (p *BackendServiceSubmitTasksArgs) Write(oprot thrift.TProtocol) (err error
return nil
WriteStructBeginError:
return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
-WriteFieldError:
- return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err)
WriteFieldStopError:
return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err)
WriteStructEndError:
return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err)
}
-func (p *BackendServiceSubmitTasksArgs) writeField1(oprot thrift.TProtocol) (err error) {
- if err = oprot.WriteFieldBegin("tasks", thrift.LIST, 1); err != nil {
- goto WriteFieldBeginError
- }
- if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Tasks)); err != nil {
- return err
- }
- for _, v := range p.Tasks {
- if err := v.Write(oprot); err != nil {
- return err
- }
- }
- if err := oprot.WriteListEnd(); err != nil {
- return err
- }
- if err = oprot.WriteFieldEnd(); err != nil {
- goto WriteFieldEndError
- }
- return nil
-WriteFieldBeginError:
- return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err)
-WriteFieldEndError:
- return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err)
-}
-
-func (p *BackendServiceSubmitTasksArgs) String() string {
+func (p *BackendServiceGetTrashUsedCapacityArgs) String() string {
if p == nil {
return ""
}
- return fmt.Sprintf("BackendServiceSubmitTasksArgs(%+v)", *p)
+ return fmt.Sprintf("BackendServiceGetTrashUsedCapacityArgs(%+v)", *p)
+
}
-func (p *BackendServiceSubmitTasksArgs) DeepEqual(ano *BackendServiceSubmitTasksArgs) bool {
+func (p *BackendServiceGetTrashUsedCapacityArgs) DeepEqual(ano *BackendServiceGetTrashUsedCapacityArgs) bool {
if p == ano {
return true
} else if p == nil || ano == nil {
return false
}
- if !p.Field1DeepEqual(ano.Tasks) {
- return false
- }
- return true
-}
-
-func (p *BackendServiceSubmitTasksArgs) Field1DeepEqual(src []*agentservice.TAgentTaskRequest) bool {
-
- if len(p.Tasks) != len(src) {
- return false
- }
- for i, v := range p.Tasks {
- _src := src[i]
- if !v.DeepEqual(_src) {
- return false
- }
- }
return true
}
-type BackendServiceSubmitTasksResult struct {
- Success *agentservice.TAgentResult_ `thrift:"success,0,optional" frugal:"0,optional,agentservice.TAgentResult_" json:"success,omitempty"`
+type BackendServiceGetTrashUsedCapacityResult struct {
+ Success *int64 `thrift:"success,0,optional" frugal:"0,optional,i64" json:"success,omitempty"`
}
-func NewBackendServiceSubmitTasksResult() *BackendServiceSubmitTasksResult {
- return &BackendServiceSubmitTasksResult{}
+func NewBackendServiceGetTrashUsedCapacityResult() *BackendServiceGetTrashUsedCapacityResult {
+ return &BackendServiceGetTrashUsedCapacityResult{}
}
-func (p *BackendServiceSubmitTasksResult) InitDefault() {
- *p = BackendServiceSubmitTasksResult{}
+func (p *BackendServiceGetTrashUsedCapacityResult) InitDefault() {
}
-var BackendServiceSubmitTasksResult_Success_DEFAULT *agentservice.TAgentResult_
+var BackendServiceGetTrashUsedCapacityResult_Success_DEFAULT int64
-func (p *BackendServiceSubmitTasksResult) GetSuccess() (v *agentservice.TAgentResult_) {
+func (p *BackendServiceGetTrashUsedCapacityResult) GetSuccess() (v int64) {
if !p.IsSetSuccess() {
- return BackendServiceSubmitTasksResult_Success_DEFAULT
+ return BackendServiceGetTrashUsedCapacityResult_Success_DEFAULT
}
- return p.Success
+ return *p.Success
}
-func (p *BackendServiceSubmitTasksResult) SetSuccess(x interface{}) {
- p.Success = x.(*agentservice.TAgentResult_)
+func (p *BackendServiceGetTrashUsedCapacityResult) SetSuccess(x interface{}) {
+ p.Success = x.(*int64)
}
-var fieldIDToName_BackendServiceSubmitTasksResult = map[int16]string{
+var fieldIDToName_BackendServiceGetTrashUsedCapacityResult = map[int16]string{
0: "success",
}
-func (p *BackendServiceSubmitTasksResult) IsSetSuccess() bool {
+func (p *BackendServiceGetTrashUsedCapacityResult) IsSetSuccess() bool {
return p.Success != nil
}
-func (p *BackendServiceSubmitTasksResult) Read(iprot thrift.TProtocol) (err error) {
+func (p *BackendServiceGetTrashUsedCapacityResult) Read(iprot thrift.TProtocol) (err error) {
var fieldTypeId thrift.TType
var fieldId int16
@@ -9135,21 +20935,18 @@ func (p *BackendServiceSubmitTasksResult) Read(iprot thrift.TProtocol) (err erro
switch fieldId {
case 0:
- if fieldTypeId == thrift.STRUCT {
+ if fieldTypeId == thrift.I64 {
if err = p.ReadField0(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
default:
if err = iprot.Skip(fieldTypeId); err != nil {
goto SkipFieldError
}
}
-
if err = iprot.ReadFieldEnd(); err != nil {
goto ReadFieldEndError
}
@@ -9164,7 +20961,7 @@ ReadStructBeginError:
ReadFieldBeginError:
return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
ReadFieldError:
- return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceSubmitTasksResult[fieldId]), err)
+ return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceGetTrashUsedCapacityResult[fieldId]), err)
SkipFieldError:
return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
@@ -9174,17 +20971,21 @@ ReadStructEndError:
return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
}
-func (p *BackendServiceSubmitTasksResult) ReadField0(iprot thrift.TProtocol) error {
- p.Success = agentservice.NewTAgentResult_()
- if err := p.Success.Read(iprot); err != nil {
+func (p *BackendServiceGetTrashUsedCapacityResult) ReadField0(iprot thrift.TProtocol) error {
+
+ var _field *int64
+ if v, err := iprot.ReadI64(); err != nil {
return err
+ } else {
+ _field = &v
}
+ p.Success = _field
return nil
}
-func (p *BackendServiceSubmitTasksResult) Write(oprot thrift.TProtocol) (err error) {
+func (p *BackendServiceGetTrashUsedCapacityResult) Write(oprot thrift.TProtocol) (err error) {
var fieldId int16
- if err = oprot.WriteStructBegin("submit_tasks_result"); err != nil {
+ if err = oprot.WriteStructBegin("get_trash_used_capacity_result"); err != nil {
goto WriteStructBeginError
}
if p != nil {
@@ -9192,7 +20993,6 @@ func (p *BackendServiceSubmitTasksResult) Write(oprot thrift.TProtocol) (err err
fieldId = 0
goto WriteFieldError
}
-
}
if err = oprot.WriteFieldStop(); err != nil {
goto WriteFieldStopError
@@ -9211,12 +21011,12 @@ WriteStructEndError:
return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err)
}
-func (p *BackendServiceSubmitTasksResult) writeField0(oprot thrift.TProtocol) (err error) {
+func (p *BackendServiceGetTrashUsedCapacityResult) writeField0(oprot thrift.TProtocol) (err error) {
if p.IsSetSuccess() {
- if err = oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil {
+ if err = oprot.WriteFieldBegin("success", thrift.I64, 0); err != nil {
goto WriteFieldBeginError
}
- if err := p.Success.Write(oprot); err != nil {
+ if err := oprot.WriteI64(*p.Success); err != nil {
return err
}
if err = oprot.WriteFieldEnd(); err != nil {
@@ -9230,14 +21030,15 @@ WriteFieldEndError:
return thrift.PrependError(fmt.Sprintf("%T write field 0 end error: ", p), err)
}
-func (p *BackendServiceSubmitTasksResult) String() string {
+func (p *BackendServiceGetTrashUsedCapacityResult) String() string {
if p == nil {
return ""
}
- return fmt.Sprintf("BackendServiceSubmitTasksResult(%+v)", *p)
+ return fmt.Sprintf("BackendServiceGetTrashUsedCapacityResult(%+v)", *p)
+
}
-func (p *BackendServiceSubmitTasksResult) DeepEqual(ano *BackendServiceSubmitTasksResult) bool {
+func (p *BackendServiceGetTrashUsedCapacityResult) DeepEqual(ano *BackendServiceGetTrashUsedCapacityResult) bool {
if p == ano {
return true
} else if p == nil || ano == nil {
@@ -9249,47 +21050,32 @@ func (p *BackendServiceSubmitTasksResult) DeepEqual(ano *BackendServiceSubmitTas
return true
}
-func (p *BackendServiceSubmitTasksResult) Field0DeepEqual(src *agentservice.TAgentResult_) bool {
+func (p *BackendServiceGetTrashUsedCapacityResult) Field0DeepEqual(src *int64) bool {
- if !p.Success.DeepEqual(src) {
+ if p.Success == src {
+ return true
+ } else if p.Success == nil || src == nil {
+ return false
+ }
+ if *p.Success != *src {
return false
}
return true
}
-type BackendServiceMakeSnapshotArgs struct {
- SnapshotRequest *agentservice.TSnapshotRequest `thrift:"snapshot_request,1" frugal:"1,default,agentservice.TSnapshotRequest" json:"snapshot_request"`
-}
-
-func NewBackendServiceMakeSnapshotArgs() *BackendServiceMakeSnapshotArgs {
- return &BackendServiceMakeSnapshotArgs{}
-}
-
-func (p *BackendServiceMakeSnapshotArgs) InitDefault() {
- *p = BackendServiceMakeSnapshotArgs{}
+type BackendServiceGetDiskTrashUsedCapacityArgs struct {
}
-var BackendServiceMakeSnapshotArgs_SnapshotRequest_DEFAULT *agentservice.TSnapshotRequest
-
-func (p *BackendServiceMakeSnapshotArgs) GetSnapshotRequest() (v *agentservice.TSnapshotRequest) {
- if !p.IsSetSnapshotRequest() {
- return BackendServiceMakeSnapshotArgs_SnapshotRequest_DEFAULT
- }
- return p.SnapshotRequest
-}
-func (p *BackendServiceMakeSnapshotArgs) SetSnapshotRequest(val *agentservice.TSnapshotRequest) {
- p.SnapshotRequest = val
+func NewBackendServiceGetDiskTrashUsedCapacityArgs() *BackendServiceGetDiskTrashUsedCapacityArgs {
+ return &BackendServiceGetDiskTrashUsedCapacityArgs{}
}
-var fieldIDToName_BackendServiceMakeSnapshotArgs = map[int16]string{
- 1: "snapshot_request",
+func (p *BackendServiceGetDiskTrashUsedCapacityArgs) InitDefault() {
}
-func (p *BackendServiceMakeSnapshotArgs) IsSetSnapshotRequest() bool {
- return p.SnapshotRequest != nil
-}
+var fieldIDToName_BackendServiceGetDiskTrashUsedCapacityArgs = map[int16]string{}
-func (p *BackendServiceMakeSnapshotArgs) Read(iprot thrift.TProtocol) (err error) {
+func (p *BackendServiceGetDiskTrashUsedCapacityArgs) Read(iprot thrift.TProtocol) (err error) {
var fieldTypeId thrift.TType
var fieldId int16
@@ -9306,24 +21092,9 @@ func (p *BackendServiceMakeSnapshotArgs) Read(iprot thrift.TProtocol) (err error
if fieldTypeId == thrift.STOP {
break
}
-
- switch fieldId {
- case 1:
- if fieldTypeId == thrift.STRUCT {
- if err = p.ReadField1(iprot); err != nil {
- goto ReadFieldError
- }
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
- }
- default:
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldTypeError
}
-
if err = iprot.ReadFieldEnd(); err != nil {
goto ReadFieldEndError
}
@@ -9337,10 +21108,8 @@ ReadStructBeginError:
return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err)
ReadFieldBeginError:
return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
-ReadFieldError:
- return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceMakeSnapshotArgs[fieldId]), err)
-SkipFieldError:
- return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
+SkipFieldTypeError:
+ return thrift.PrependError(fmt.Sprintf("%T skip field type %d error", p, fieldTypeId), err)
ReadFieldEndError:
return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err)
@@ -9348,25 +21117,11 @@ ReadStructEndError:
return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
}
-func (p *BackendServiceMakeSnapshotArgs) ReadField1(iprot thrift.TProtocol) error {
- p.SnapshotRequest = agentservice.NewTSnapshotRequest()
- if err := p.SnapshotRequest.Read(iprot); err != nil {
- return err
- }
- return nil
-}
-
-func (p *BackendServiceMakeSnapshotArgs) Write(oprot thrift.TProtocol) (err error) {
- var fieldId int16
- if err = oprot.WriteStructBegin("make_snapshot_args"); err != nil {
+func (p *BackendServiceGetDiskTrashUsedCapacityArgs) Write(oprot thrift.TProtocol) (err error) {
+ if err = oprot.WriteStructBegin("get_disk_trash_used_capacity_args"); err != nil {
goto WriteStructBeginError
}
if p != nil {
- if err = p.writeField1(oprot); err != nil {
- fieldId = 1
- goto WriteFieldError
- }
-
}
if err = oprot.WriteFieldStop(); err != nil {
goto WriteFieldStopError
@@ -9377,91 +21132,61 @@ func (p *BackendServiceMakeSnapshotArgs) Write(oprot thrift.TProtocol) (err erro
return nil
WriteStructBeginError:
return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
-WriteFieldError:
- return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err)
WriteFieldStopError:
return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err)
WriteStructEndError:
return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err)
}
-func (p *BackendServiceMakeSnapshotArgs) writeField1(oprot thrift.TProtocol) (err error) {
- if err = oprot.WriteFieldBegin("snapshot_request", thrift.STRUCT, 1); err != nil {
- goto WriteFieldBeginError
- }
- if err := p.SnapshotRequest.Write(oprot); err != nil {
- return err
- }
- if err = oprot.WriteFieldEnd(); err != nil {
- goto WriteFieldEndError
- }
- return nil
-WriteFieldBeginError:
- return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err)
-WriteFieldEndError:
- return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err)
-}
-
-func (p *BackendServiceMakeSnapshotArgs) String() string {
+func (p *BackendServiceGetDiskTrashUsedCapacityArgs) String() string {
if p == nil {
return ""
}
- return fmt.Sprintf("BackendServiceMakeSnapshotArgs(%+v)", *p)
+ return fmt.Sprintf("BackendServiceGetDiskTrashUsedCapacityArgs(%+v)", *p)
+
}
-func (p *BackendServiceMakeSnapshotArgs) DeepEqual(ano *BackendServiceMakeSnapshotArgs) bool {
+func (p *BackendServiceGetDiskTrashUsedCapacityArgs) DeepEqual(ano *BackendServiceGetDiskTrashUsedCapacityArgs) bool {
if p == ano {
return true
} else if p == nil || ano == nil {
return false
}
- if !p.Field1DeepEqual(ano.SnapshotRequest) {
- return false
- }
- return true
-}
-
-func (p *BackendServiceMakeSnapshotArgs) Field1DeepEqual(src *agentservice.TSnapshotRequest) bool {
-
- if !p.SnapshotRequest.DeepEqual(src) {
- return false
- }
return true
}
-type BackendServiceMakeSnapshotResult struct {
- Success *agentservice.TAgentResult_ `thrift:"success,0,optional" frugal:"0,optional,agentservice.TAgentResult_" json:"success,omitempty"`
+type BackendServiceGetDiskTrashUsedCapacityResult struct {
+ Success []*TDiskTrashInfo `thrift:"success,0,optional" frugal:"0,optional,list" json:"success,omitempty"`
}
-func NewBackendServiceMakeSnapshotResult() *BackendServiceMakeSnapshotResult {
- return &BackendServiceMakeSnapshotResult{}
+func NewBackendServiceGetDiskTrashUsedCapacityResult() *BackendServiceGetDiskTrashUsedCapacityResult {
+ return &BackendServiceGetDiskTrashUsedCapacityResult{}
}
-func (p *BackendServiceMakeSnapshotResult) InitDefault() {
- *p = BackendServiceMakeSnapshotResult{}
+func (p *BackendServiceGetDiskTrashUsedCapacityResult) InitDefault() {
}
-var BackendServiceMakeSnapshotResult_Success_DEFAULT *agentservice.TAgentResult_
+var BackendServiceGetDiskTrashUsedCapacityResult_Success_DEFAULT []*TDiskTrashInfo
-func (p *BackendServiceMakeSnapshotResult) GetSuccess() (v *agentservice.TAgentResult_) {
+func (p *BackendServiceGetDiskTrashUsedCapacityResult) GetSuccess() (v []*TDiskTrashInfo) {
if !p.IsSetSuccess() {
- return BackendServiceMakeSnapshotResult_Success_DEFAULT
+ return BackendServiceGetDiskTrashUsedCapacityResult_Success_DEFAULT
}
return p.Success
}
-func (p *BackendServiceMakeSnapshotResult) SetSuccess(x interface{}) {
- p.Success = x.(*agentservice.TAgentResult_)
+func (p *BackendServiceGetDiskTrashUsedCapacityResult) SetSuccess(x interface{}) {
+ p.Success = x.([]*TDiskTrashInfo)
}
-var fieldIDToName_BackendServiceMakeSnapshotResult = map[int16]string{
+var fieldIDToName_BackendServiceGetDiskTrashUsedCapacityResult = map[int16]string{
0: "success",
}
-func (p *BackendServiceMakeSnapshotResult) IsSetSuccess() bool {
+func (p *BackendServiceGetDiskTrashUsedCapacityResult) IsSetSuccess() bool {
return p.Success != nil
}
-func (p *BackendServiceMakeSnapshotResult) Read(iprot thrift.TProtocol) (err error) {
+func (p *BackendServiceGetDiskTrashUsedCapacityResult) Read(iprot thrift.TProtocol) (err error) {
var fieldTypeId thrift.TType
var fieldId int16
@@ -9481,21 +21206,18 @@ func (p *BackendServiceMakeSnapshotResult) Read(iprot thrift.TProtocol) (err err
switch fieldId {
case 0:
- if fieldTypeId == thrift.STRUCT {
+ if fieldTypeId == thrift.LIST {
if err = p.ReadField0(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
default:
if err = iprot.Skip(fieldTypeId); err != nil {
goto SkipFieldError
}
}
-
if err = iprot.ReadFieldEnd(); err != nil {
goto ReadFieldEndError
}
@@ -9510,7 +21232,7 @@ ReadStructBeginError:
ReadFieldBeginError:
return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
ReadFieldError:
- return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceMakeSnapshotResult[fieldId]), err)
+ return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceGetDiskTrashUsedCapacityResult[fieldId]), err)
SkipFieldError:
return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
@@ -9520,17 +21242,33 @@ ReadStructEndError:
return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
}
-func (p *BackendServiceMakeSnapshotResult) ReadField0(iprot thrift.TProtocol) error {
- p.Success = agentservice.NewTAgentResult_()
- if err := p.Success.Read(iprot); err != nil {
+func (p *BackendServiceGetDiskTrashUsedCapacityResult) ReadField0(iprot thrift.TProtocol) error {
+ _, size, err := iprot.ReadListBegin()
+ if err != nil {
+ return err
+ }
+ _field := make([]*TDiskTrashInfo, 0, size)
+ values := make([]TDiskTrashInfo, size)
+ for i := 0; i < size; i++ {
+ _elem := &values[i]
+ _elem.InitDefault()
+
+ if err := _elem.Read(iprot); err != nil {
+ return err
+ }
+
+ _field = append(_field, _elem)
+ }
+ if err := iprot.ReadListEnd(); err != nil {
return err
}
+ p.Success = _field
return nil
}
-func (p *BackendServiceMakeSnapshotResult) Write(oprot thrift.TProtocol) (err error) {
+func (p *BackendServiceGetDiskTrashUsedCapacityResult) Write(oprot thrift.TProtocol) (err error) {
var fieldId int16
- if err = oprot.WriteStructBegin("make_snapshot_result"); err != nil {
+ if err = oprot.WriteStructBegin("get_disk_trash_used_capacity_result"); err != nil {
goto WriteStructBeginError
}
if p != nil {
@@ -9538,7 +21276,6 @@ func (p *BackendServiceMakeSnapshotResult) Write(oprot thrift.TProtocol) (err er
fieldId = 0
goto WriteFieldError
}
-
}
if err = oprot.WriteFieldStop(); err != nil {
goto WriteFieldStopError
@@ -9557,12 +21294,20 @@ WriteStructEndError:
return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err)
}
-func (p *BackendServiceMakeSnapshotResult) writeField0(oprot thrift.TProtocol) (err error) {
+func (p *BackendServiceGetDiskTrashUsedCapacityResult) writeField0(oprot thrift.TProtocol) (err error) {
if p.IsSetSuccess() {
- if err = oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil {
+ if err = oprot.WriteFieldBegin("success", thrift.LIST, 0); err != nil {
goto WriteFieldBeginError
}
- if err := p.Success.Write(oprot); err != nil {
+ if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Success)); err != nil {
+ return err
+ }
+ for _, v := range p.Success {
+ if err := v.Write(oprot); err != nil {
+ return err
+ }
+ }
+ if err := oprot.WriteListEnd(); err != nil {
return err
}
if err = oprot.WriteFieldEnd(); err != nil {
@@ -9576,14 +21321,15 @@ WriteFieldEndError:
return thrift.PrependError(fmt.Sprintf("%T write field 0 end error: ", p), err)
}
-func (p *BackendServiceMakeSnapshotResult) String() string {
+func (p *BackendServiceGetDiskTrashUsedCapacityResult) String() string {
if p == nil {
return ""
}
- return fmt.Sprintf("BackendServiceMakeSnapshotResult(%+v)", *p)
+ return fmt.Sprintf("BackendServiceGetDiskTrashUsedCapacityResult(%+v)", *p)
+
}
-func (p *BackendServiceMakeSnapshotResult) DeepEqual(ano *BackendServiceMakeSnapshotResult) bool {
+func (p *BackendServiceGetDiskTrashUsedCapacityResult) DeepEqual(ano *BackendServiceGetDiskTrashUsedCapacityResult) bool {
if p == ano {
return true
} else if p == nil || ano == nil {
@@ -9595,38 +21341,43 @@ func (p *BackendServiceMakeSnapshotResult) DeepEqual(ano *BackendServiceMakeSnap
return true
}
-func (p *BackendServiceMakeSnapshotResult) Field0DeepEqual(src *agentservice.TAgentResult_) bool {
+func (p *BackendServiceGetDiskTrashUsedCapacityResult) Field0DeepEqual(src []*TDiskTrashInfo) bool {
- if !p.Success.DeepEqual(src) {
+ if len(p.Success) != len(src) {
return false
}
+ for i, v := range p.Success {
+ _src := src[i]
+ if !v.DeepEqual(_src) {
+ return false
+ }
+ }
return true
}
-type BackendServiceReleaseSnapshotArgs struct {
- SnapshotPath string `thrift:"snapshot_path,1" frugal:"1,default,string" json:"snapshot_path"`
+type BackendServiceSubmitRoutineLoadTaskArgs struct {
+ Tasks []*TRoutineLoadTask `thrift:"tasks,1" frugal:"1,default,list" json:"tasks"`
}
-func NewBackendServiceReleaseSnapshotArgs() *BackendServiceReleaseSnapshotArgs {
- return &BackendServiceReleaseSnapshotArgs{}
+func NewBackendServiceSubmitRoutineLoadTaskArgs() *BackendServiceSubmitRoutineLoadTaskArgs {
+ return &BackendServiceSubmitRoutineLoadTaskArgs{}
}
-func (p *BackendServiceReleaseSnapshotArgs) InitDefault() {
- *p = BackendServiceReleaseSnapshotArgs{}
+func (p *BackendServiceSubmitRoutineLoadTaskArgs) InitDefault() {
}
-func (p *BackendServiceReleaseSnapshotArgs) GetSnapshotPath() (v string) {
- return p.SnapshotPath
+func (p *BackendServiceSubmitRoutineLoadTaskArgs) GetTasks() (v []*TRoutineLoadTask) {
+ return p.Tasks
}
-func (p *BackendServiceReleaseSnapshotArgs) SetSnapshotPath(val string) {
- p.SnapshotPath = val
+func (p *BackendServiceSubmitRoutineLoadTaskArgs) SetTasks(val []*TRoutineLoadTask) {
+ p.Tasks = val
}
-var fieldIDToName_BackendServiceReleaseSnapshotArgs = map[int16]string{
- 1: "snapshot_path",
+var fieldIDToName_BackendServiceSubmitRoutineLoadTaskArgs = map[int16]string{
+ 1: "tasks",
}
-func (p *BackendServiceReleaseSnapshotArgs) Read(iprot thrift.TProtocol) (err error) {
+func (p *BackendServiceSubmitRoutineLoadTaskArgs) Read(iprot thrift.TProtocol) (err error) {
var fieldTypeId thrift.TType
var fieldId int16
@@ -9646,21 +21397,18 @@ func (p *BackendServiceReleaseSnapshotArgs) Read(iprot thrift.TProtocol) (err er
switch fieldId {
case 1:
- if fieldTypeId == thrift.STRING {
+ if fieldTypeId == thrift.LIST {
if err = p.ReadField1(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
default:
if err = iprot.Skip(fieldTypeId); err != nil {
goto SkipFieldError
}
}
-
if err = iprot.ReadFieldEnd(); err != nil {
goto ReadFieldEndError
}
@@ -9675,7 +21423,7 @@ ReadStructBeginError:
ReadFieldBeginError:
return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
ReadFieldError:
- return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceReleaseSnapshotArgs[fieldId]), err)
+ return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceSubmitRoutineLoadTaskArgs[fieldId]), err)
SkipFieldError:
return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
@@ -9685,18 +21433,33 @@ ReadStructEndError:
return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
}
-func (p *BackendServiceReleaseSnapshotArgs) ReadField1(iprot thrift.TProtocol) error {
- if v, err := iprot.ReadString(); err != nil {
+func (p *BackendServiceSubmitRoutineLoadTaskArgs) ReadField1(iprot thrift.TProtocol) error {
+ _, size, err := iprot.ReadListBegin()
+ if err != nil {
return err
- } else {
- p.SnapshotPath = v
}
+ _field := make([]*TRoutineLoadTask, 0, size)
+ values := make([]TRoutineLoadTask, size)
+ for i := 0; i < size; i++ {
+ _elem := &values[i]
+ _elem.InitDefault()
+
+ if err := _elem.Read(iprot); err != nil {
+ return err
+ }
+
+ _field = append(_field, _elem)
+ }
+ if err := iprot.ReadListEnd(); err != nil {
+ return err
+ }
+ p.Tasks = _field
return nil
}
-func (p *BackendServiceReleaseSnapshotArgs) Write(oprot thrift.TProtocol) (err error) {
+func (p *BackendServiceSubmitRoutineLoadTaskArgs) Write(oprot thrift.TProtocol) (err error) {
var fieldId int16
- if err = oprot.WriteStructBegin("release_snapshot_args"); err != nil {
+ if err = oprot.WriteStructBegin("submit_routine_load_task_args"); err != nil {
goto WriteStructBeginError
}
if p != nil {
@@ -9704,7 +21467,6 @@ func (p *BackendServiceReleaseSnapshotArgs) Write(oprot thrift.TProtocol) (err e
fieldId = 1
goto WriteFieldError
}
-
}
if err = oprot.WriteFieldStop(); err != nil {
goto WriteFieldStopError
@@ -9723,11 +21485,19 @@ WriteStructEndError:
return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err)
}
-func (p *BackendServiceReleaseSnapshotArgs) writeField1(oprot thrift.TProtocol) (err error) {
- if err = oprot.WriteFieldBegin("snapshot_path", thrift.STRING, 1); err != nil {
+func (p *BackendServiceSubmitRoutineLoadTaskArgs) writeField1(oprot thrift.TProtocol) (err error) {
+ if err = oprot.WriteFieldBegin("tasks", thrift.LIST, 1); err != nil {
goto WriteFieldBeginError
}
- if err := oprot.WriteString(p.SnapshotPath); err != nil {
+ if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Tasks)); err != nil {
+ return err
+ }
+ for _, v := range p.Tasks {
+ if err := v.Write(oprot); err != nil {
+ return err
+ }
+ }
+ if err := oprot.WriteListEnd(); err != nil {
return err
}
if err = oprot.WriteFieldEnd(); err != nil {
@@ -9740,66 +21510,72 @@ WriteFieldEndError:
return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err)
}
-func (p *BackendServiceReleaseSnapshotArgs) String() string {
+func (p *BackendServiceSubmitRoutineLoadTaskArgs) String() string {
if p == nil {
return ""
}
- return fmt.Sprintf("BackendServiceReleaseSnapshotArgs(%+v)", *p)
+ return fmt.Sprintf("BackendServiceSubmitRoutineLoadTaskArgs(%+v)", *p)
+
}
-func (p *BackendServiceReleaseSnapshotArgs) DeepEqual(ano *BackendServiceReleaseSnapshotArgs) bool {
+func (p *BackendServiceSubmitRoutineLoadTaskArgs) DeepEqual(ano *BackendServiceSubmitRoutineLoadTaskArgs) bool {
if p == ano {
return true
} else if p == nil || ano == nil {
return false
}
- if !p.Field1DeepEqual(ano.SnapshotPath) {
+ if !p.Field1DeepEqual(ano.Tasks) {
return false
}
return true
}
-func (p *BackendServiceReleaseSnapshotArgs) Field1DeepEqual(src string) bool {
+func (p *BackendServiceSubmitRoutineLoadTaskArgs) Field1DeepEqual(src []*TRoutineLoadTask) bool {
- if strings.Compare(p.SnapshotPath, src) != 0 {
+ if len(p.Tasks) != len(src) {
return false
}
+ for i, v := range p.Tasks {
+ _src := src[i]
+ if !v.DeepEqual(_src) {
+ return false
+ }
+ }
return true
}
-type BackendServiceReleaseSnapshotResult struct {
- Success *agentservice.TAgentResult_ `thrift:"success,0,optional" frugal:"0,optional,agentservice.TAgentResult_" json:"success,omitempty"`
+type BackendServiceSubmitRoutineLoadTaskResult struct {
+ Success *status.TStatus `thrift:"success,0,optional" frugal:"0,optional,status.TStatus" json:"success,omitempty"`
}
-func NewBackendServiceReleaseSnapshotResult() *BackendServiceReleaseSnapshotResult {
- return &BackendServiceReleaseSnapshotResult{}
+func NewBackendServiceSubmitRoutineLoadTaskResult() *BackendServiceSubmitRoutineLoadTaskResult {
+ return &BackendServiceSubmitRoutineLoadTaskResult{}
}
-func (p *BackendServiceReleaseSnapshotResult) InitDefault() {
- *p = BackendServiceReleaseSnapshotResult{}
+func (p *BackendServiceSubmitRoutineLoadTaskResult) InitDefault() {
}
-var BackendServiceReleaseSnapshotResult_Success_DEFAULT *agentservice.TAgentResult_
+var BackendServiceSubmitRoutineLoadTaskResult_Success_DEFAULT *status.TStatus
-func (p *BackendServiceReleaseSnapshotResult) GetSuccess() (v *agentservice.TAgentResult_) {
+func (p *BackendServiceSubmitRoutineLoadTaskResult) GetSuccess() (v *status.TStatus) {
if !p.IsSetSuccess() {
- return BackendServiceReleaseSnapshotResult_Success_DEFAULT
+ return BackendServiceSubmitRoutineLoadTaskResult_Success_DEFAULT
}
return p.Success
}
-func (p *BackendServiceReleaseSnapshotResult) SetSuccess(x interface{}) {
- p.Success = x.(*agentservice.TAgentResult_)
+func (p *BackendServiceSubmitRoutineLoadTaskResult) SetSuccess(x interface{}) {
+ p.Success = x.(*status.TStatus)
}
-var fieldIDToName_BackendServiceReleaseSnapshotResult = map[int16]string{
+var fieldIDToName_BackendServiceSubmitRoutineLoadTaskResult = map[int16]string{
0: "success",
}
-func (p *BackendServiceReleaseSnapshotResult) IsSetSuccess() bool {
+func (p *BackendServiceSubmitRoutineLoadTaskResult) IsSetSuccess() bool {
return p.Success != nil
}
-func (p *BackendServiceReleaseSnapshotResult) Read(iprot thrift.TProtocol) (err error) {
+func (p *BackendServiceSubmitRoutineLoadTaskResult) Read(iprot thrift.TProtocol) (err error) {
var fieldTypeId thrift.TType
var fieldId int16
@@ -9823,17 +21599,14 @@ func (p *BackendServiceReleaseSnapshotResult) Read(iprot thrift.TProtocol) (err
if err = p.ReadField0(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
default:
if err = iprot.Skip(fieldTypeId); err != nil {
goto SkipFieldError
}
}
-
if err = iprot.ReadFieldEnd(); err != nil {
goto ReadFieldEndError
}
@@ -9848,7 +21621,7 @@ ReadStructBeginError:
ReadFieldBeginError:
return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
ReadFieldError:
- return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceReleaseSnapshotResult[fieldId]), err)
+ return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceSubmitRoutineLoadTaskResult[fieldId]), err)
SkipFieldError:
return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
@@ -9858,17 +21631,18 @@ ReadStructEndError:
return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
}
-func (p *BackendServiceReleaseSnapshotResult) ReadField0(iprot thrift.TProtocol) error {
- p.Success = agentservice.NewTAgentResult_()
- if err := p.Success.Read(iprot); err != nil {
+func (p *BackendServiceSubmitRoutineLoadTaskResult) ReadField0(iprot thrift.TProtocol) error {
+ _field := status.NewTStatus()
+ if err := _field.Read(iprot); err != nil {
return err
}
+ p.Success = _field
return nil
}
-func (p *BackendServiceReleaseSnapshotResult) Write(oprot thrift.TProtocol) (err error) {
+func (p *BackendServiceSubmitRoutineLoadTaskResult) Write(oprot thrift.TProtocol) (err error) {
var fieldId int16
- if err = oprot.WriteStructBegin("release_snapshot_result"); err != nil {
+ if err = oprot.WriteStructBegin("submit_routine_load_task_result"); err != nil {
goto WriteStructBeginError
}
if p != nil {
@@ -9876,7 +21650,6 @@ func (p *BackendServiceReleaseSnapshotResult) Write(oprot thrift.TProtocol) (err
fieldId = 0
goto WriteFieldError
}
-
}
if err = oprot.WriteFieldStop(); err != nil {
goto WriteFieldStopError
@@ -9895,7 +21668,7 @@ WriteStructEndError:
return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err)
}
-func (p *BackendServiceReleaseSnapshotResult) writeField0(oprot thrift.TProtocol) (err error) {
+func (p *BackendServiceSubmitRoutineLoadTaskResult) writeField0(oprot thrift.TProtocol) (err error) {
if p.IsSetSuccess() {
if err = oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil {
goto WriteFieldBeginError
@@ -9914,14 +21687,15 @@ WriteFieldEndError:
return thrift.PrependError(fmt.Sprintf("%T write field 0 end error: ", p), err)
}
-func (p *BackendServiceReleaseSnapshotResult) String() string {
+func (p *BackendServiceSubmitRoutineLoadTaskResult) String() string {
if p == nil {
return ""
}
- return fmt.Sprintf("BackendServiceReleaseSnapshotResult(%+v)", *p)
+ return fmt.Sprintf("BackendServiceSubmitRoutineLoadTaskResult(%+v)", *p)
+
}
-func (p *BackendServiceReleaseSnapshotResult) DeepEqual(ano *BackendServiceReleaseSnapshotResult) bool {
+func (p *BackendServiceSubmitRoutineLoadTaskResult) DeepEqual(ano *BackendServiceSubmitRoutineLoadTaskResult) bool {
if p == ano {
return true
} else if p == nil || ano == nil {
@@ -9933,7 +21707,7 @@ func (p *BackendServiceReleaseSnapshotResult) DeepEqual(ano *BackendServiceRelea
return true
}
-func (p *BackendServiceReleaseSnapshotResult) Field0DeepEqual(src *agentservice.TAgentResult_) bool {
+func (p *BackendServiceSubmitRoutineLoadTaskResult) Field0DeepEqual(src *status.TStatus) bool {
if !p.Success.DeepEqual(src) {
return false
@@ -9941,39 +21715,38 @@ func (p *BackendServiceReleaseSnapshotResult) Field0DeepEqual(src *agentservice.
return true
}
-type BackendServicePublishClusterStateArgs struct {
- Request *agentservice.TAgentPublishRequest `thrift:"request,1" frugal:"1,default,agentservice.TAgentPublishRequest" json:"request"`
+type BackendServiceOpenScannerArgs struct {
+ Params *dorisexternalservice.TScanOpenParams `thrift:"params,1" frugal:"1,default,dorisexternalservice.TScanOpenParams" json:"params"`
}
-func NewBackendServicePublishClusterStateArgs() *BackendServicePublishClusterStateArgs {
- return &BackendServicePublishClusterStateArgs{}
+func NewBackendServiceOpenScannerArgs() *BackendServiceOpenScannerArgs {
+ return &BackendServiceOpenScannerArgs{}
}
-func (p *BackendServicePublishClusterStateArgs) InitDefault() {
- *p = BackendServicePublishClusterStateArgs{}
+func (p *BackendServiceOpenScannerArgs) InitDefault() {
}
-var BackendServicePublishClusterStateArgs_Request_DEFAULT *agentservice.TAgentPublishRequest
+var BackendServiceOpenScannerArgs_Params_DEFAULT *dorisexternalservice.TScanOpenParams
-func (p *BackendServicePublishClusterStateArgs) GetRequest() (v *agentservice.TAgentPublishRequest) {
- if !p.IsSetRequest() {
- return BackendServicePublishClusterStateArgs_Request_DEFAULT
+func (p *BackendServiceOpenScannerArgs) GetParams() (v *dorisexternalservice.TScanOpenParams) {
+ if !p.IsSetParams() {
+ return BackendServiceOpenScannerArgs_Params_DEFAULT
}
- return p.Request
+ return p.Params
}
-func (p *BackendServicePublishClusterStateArgs) SetRequest(val *agentservice.TAgentPublishRequest) {
- p.Request = val
+func (p *BackendServiceOpenScannerArgs) SetParams(val *dorisexternalservice.TScanOpenParams) {
+ p.Params = val
}
-var fieldIDToName_BackendServicePublishClusterStateArgs = map[int16]string{
- 1: "request",
+var fieldIDToName_BackendServiceOpenScannerArgs = map[int16]string{
+ 1: "params",
}
-func (p *BackendServicePublishClusterStateArgs) IsSetRequest() bool {
- return p.Request != nil
+func (p *BackendServiceOpenScannerArgs) IsSetParams() bool {
+ return p.Params != nil
}
-func (p *BackendServicePublishClusterStateArgs) Read(iprot thrift.TProtocol) (err error) {
+func (p *BackendServiceOpenScannerArgs) Read(iprot thrift.TProtocol) (err error) {
var fieldTypeId thrift.TType
var fieldId int16
@@ -9997,17 +21770,14 @@ func (p *BackendServicePublishClusterStateArgs) Read(iprot thrift.TProtocol) (er
if err = p.ReadField1(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
default:
if err = iprot.Skip(fieldTypeId); err != nil {
goto SkipFieldError
}
}
-
if err = iprot.ReadFieldEnd(); err != nil {
goto ReadFieldEndError
}
@@ -10022,7 +21792,7 @@ ReadStructBeginError:
ReadFieldBeginError:
return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
ReadFieldError:
- return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServicePublishClusterStateArgs[fieldId]), err)
+ return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceOpenScannerArgs[fieldId]), err)
SkipFieldError:
return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
@@ -10032,17 +21802,18 @@ ReadStructEndError:
return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
}
-func (p *BackendServicePublishClusterStateArgs) ReadField1(iprot thrift.TProtocol) error {
- p.Request = agentservice.NewTAgentPublishRequest()
- if err := p.Request.Read(iprot); err != nil {
+func (p *BackendServiceOpenScannerArgs) ReadField1(iprot thrift.TProtocol) error {
+ _field := dorisexternalservice.NewTScanOpenParams()
+ if err := _field.Read(iprot); err != nil {
return err
}
+ p.Params = _field
return nil
}
-func (p *BackendServicePublishClusterStateArgs) Write(oprot thrift.TProtocol) (err error) {
+func (p *BackendServiceOpenScannerArgs) Write(oprot thrift.TProtocol) (err error) {
var fieldId int16
- if err = oprot.WriteStructBegin("publish_cluster_state_args"); err != nil {
+ if err = oprot.WriteStructBegin("open_scanner_args"); err != nil {
goto WriteStructBeginError
}
if p != nil {
@@ -10050,7 +21821,6 @@ func (p *BackendServicePublishClusterStateArgs) Write(oprot thrift.TProtocol) (e
fieldId = 1
goto WriteFieldError
}
-
}
if err = oprot.WriteFieldStop(); err != nil {
goto WriteFieldStopError
@@ -10069,11 +21839,11 @@ WriteStructEndError:
return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err)
}
-func (p *BackendServicePublishClusterStateArgs) writeField1(oprot thrift.TProtocol) (err error) {
- if err = oprot.WriteFieldBegin("request", thrift.STRUCT, 1); err != nil {
+func (p *BackendServiceOpenScannerArgs) writeField1(oprot thrift.TProtocol) (err error) {
+ if err = oprot.WriteFieldBegin("params", thrift.STRUCT, 1); err != nil {
goto WriteFieldBeginError
}
- if err := p.Request.Write(oprot); err != nil {
+ if err := p.Params.Write(oprot); err != nil {
return err
}
if err = oprot.WriteFieldEnd(); err != nil {
@@ -10086,66 +21856,66 @@ WriteFieldEndError:
return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err)
}
-func (p *BackendServicePublishClusterStateArgs) String() string {
+func (p *BackendServiceOpenScannerArgs) String() string {
if p == nil {
return ""
}
- return fmt.Sprintf("BackendServicePublishClusterStateArgs(%+v)", *p)
+ return fmt.Sprintf("BackendServiceOpenScannerArgs(%+v)", *p)
+
}
-func (p *BackendServicePublishClusterStateArgs) DeepEqual(ano *BackendServicePublishClusterStateArgs) bool {
+func (p *BackendServiceOpenScannerArgs) DeepEqual(ano *BackendServiceOpenScannerArgs) bool {
if p == ano {
return true
} else if p == nil || ano == nil {
return false
}
- if !p.Field1DeepEqual(ano.Request) {
+ if !p.Field1DeepEqual(ano.Params) {
return false
}
return true
}
-func (p *BackendServicePublishClusterStateArgs) Field1DeepEqual(src *agentservice.TAgentPublishRequest) bool {
+func (p *BackendServiceOpenScannerArgs) Field1DeepEqual(src *dorisexternalservice.TScanOpenParams) bool {
- if !p.Request.DeepEqual(src) {
+ if !p.Params.DeepEqual(src) {
return false
}
return true
}
-type BackendServicePublishClusterStateResult struct {
- Success *agentservice.TAgentResult_ `thrift:"success,0,optional" frugal:"0,optional,agentservice.TAgentResult_" json:"success,omitempty"`
+type BackendServiceOpenScannerResult struct {
+ Success *dorisexternalservice.TScanOpenResult_ `thrift:"success,0,optional" frugal:"0,optional,dorisexternalservice.TScanOpenResult_" json:"success,omitempty"`
}
-func NewBackendServicePublishClusterStateResult() *BackendServicePublishClusterStateResult {
- return &BackendServicePublishClusterStateResult{}
+func NewBackendServiceOpenScannerResult() *BackendServiceOpenScannerResult {
+ return &BackendServiceOpenScannerResult{}
}
-func (p *BackendServicePublishClusterStateResult) InitDefault() {
- *p = BackendServicePublishClusterStateResult{}
+func (p *BackendServiceOpenScannerResult) InitDefault() {
}
-var BackendServicePublishClusterStateResult_Success_DEFAULT *agentservice.TAgentResult_
+var BackendServiceOpenScannerResult_Success_DEFAULT *dorisexternalservice.TScanOpenResult_
-func (p *BackendServicePublishClusterStateResult) GetSuccess() (v *agentservice.TAgentResult_) {
+func (p *BackendServiceOpenScannerResult) GetSuccess() (v *dorisexternalservice.TScanOpenResult_) {
if !p.IsSetSuccess() {
- return BackendServicePublishClusterStateResult_Success_DEFAULT
+ return BackendServiceOpenScannerResult_Success_DEFAULT
}
return p.Success
}
-func (p *BackendServicePublishClusterStateResult) SetSuccess(x interface{}) {
- p.Success = x.(*agentservice.TAgentResult_)
+func (p *BackendServiceOpenScannerResult) SetSuccess(x interface{}) {
+ p.Success = x.(*dorisexternalservice.TScanOpenResult_)
}
-var fieldIDToName_BackendServicePublishClusterStateResult = map[int16]string{
+var fieldIDToName_BackendServiceOpenScannerResult = map[int16]string{
0: "success",
}
-func (p *BackendServicePublishClusterStateResult) IsSetSuccess() bool {
+func (p *BackendServiceOpenScannerResult) IsSetSuccess() bool {
return p.Success != nil
}
-func (p *BackendServicePublishClusterStateResult) Read(iprot thrift.TProtocol) (err error) {
+func (p *BackendServiceOpenScannerResult) Read(iprot thrift.TProtocol) (err error) {
var fieldTypeId thrift.TType
var fieldId int16
@@ -10169,17 +21939,14 @@ func (p *BackendServicePublishClusterStateResult) Read(iprot thrift.TProtocol) (
if err = p.ReadField0(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
default:
if err = iprot.Skip(fieldTypeId); err != nil {
goto SkipFieldError
}
}
-
if err = iprot.ReadFieldEnd(); err != nil {
goto ReadFieldEndError
}
@@ -10194,7 +21961,7 @@ ReadStructBeginError:
ReadFieldBeginError:
return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
ReadFieldError:
- return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServicePublishClusterStateResult[fieldId]), err)
+ return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceOpenScannerResult[fieldId]), err)
SkipFieldError:
return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
@@ -10204,17 +21971,18 @@ ReadStructEndError:
return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
}
-func (p *BackendServicePublishClusterStateResult) ReadField0(iprot thrift.TProtocol) error {
- p.Success = agentservice.NewTAgentResult_()
- if err := p.Success.Read(iprot); err != nil {
+func (p *BackendServiceOpenScannerResult) ReadField0(iprot thrift.TProtocol) error {
+ _field := dorisexternalservice.NewTScanOpenResult_()
+ if err := _field.Read(iprot); err != nil {
return err
}
+ p.Success = _field
return nil
}
-func (p *BackendServicePublishClusterStateResult) Write(oprot thrift.TProtocol) (err error) {
+func (p *BackendServiceOpenScannerResult) Write(oprot thrift.TProtocol) (err error) {
var fieldId int16
- if err = oprot.WriteStructBegin("publish_cluster_state_result"); err != nil {
+ if err = oprot.WriteStructBegin("open_scanner_result"); err != nil {
goto WriteStructBeginError
}
if p != nil {
@@ -10222,7 +21990,6 @@ func (p *BackendServicePublishClusterStateResult) Write(oprot thrift.TProtocol)
fieldId = 0
goto WriteFieldError
}
-
}
if err = oprot.WriteFieldStop(); err != nil {
goto WriteFieldStopError
@@ -10241,7 +22008,7 @@ WriteStructEndError:
return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err)
}
-func (p *BackendServicePublishClusterStateResult) writeField0(oprot thrift.TProtocol) (err error) {
+func (p *BackendServiceOpenScannerResult) writeField0(oprot thrift.TProtocol) (err error) {
if p.IsSetSuccess() {
if err = oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil {
goto WriteFieldBeginError
@@ -10260,14 +22027,15 @@ WriteFieldEndError:
return thrift.PrependError(fmt.Sprintf("%T write field 0 end error: ", p), err)
}
-func (p *BackendServicePublishClusterStateResult) String() string {
+func (p *BackendServiceOpenScannerResult) String() string {
if p == nil {
return ""
}
- return fmt.Sprintf("BackendServicePublishClusterStateResult(%+v)", *p)
+ return fmt.Sprintf("BackendServiceOpenScannerResult(%+v)", *p)
+
}
-func (p *BackendServicePublishClusterStateResult) DeepEqual(ano *BackendServicePublishClusterStateResult) bool {
+func (p *BackendServiceOpenScannerResult) DeepEqual(ano *BackendServiceOpenScannerResult) bool {
if p == ano {
return true
} else if p == nil || ano == nil {
@@ -10279,7 +22047,7 @@ func (p *BackendServicePublishClusterStateResult) DeepEqual(ano *BackendServiceP
return true
}
-func (p *BackendServicePublishClusterStateResult) Field0DeepEqual(src *agentservice.TAgentResult_) bool {
+func (p *BackendServiceOpenScannerResult) Field0DeepEqual(src *dorisexternalservice.TScanOpenResult_) bool {
if !p.Success.DeepEqual(src) {
return false
@@ -10287,39 +22055,38 @@ func (p *BackendServicePublishClusterStateResult) Field0DeepEqual(src *agentserv
return true
}
-type BackendServiceSubmitExportTaskArgs struct {
- Request *TExportTaskRequest `thrift:"request,1" frugal:"1,default,TExportTaskRequest" json:"request"`
+type BackendServiceGetNextArgs struct {
+ Params *dorisexternalservice.TScanNextBatchParams `thrift:"params,1" frugal:"1,default,dorisexternalservice.TScanNextBatchParams" json:"params"`
}
-func NewBackendServiceSubmitExportTaskArgs() *BackendServiceSubmitExportTaskArgs {
- return &BackendServiceSubmitExportTaskArgs{}
+func NewBackendServiceGetNextArgs() *BackendServiceGetNextArgs {
+ return &BackendServiceGetNextArgs{}
}
-func (p *BackendServiceSubmitExportTaskArgs) InitDefault() {
- *p = BackendServiceSubmitExportTaskArgs{}
+func (p *BackendServiceGetNextArgs) InitDefault() {
}
-var BackendServiceSubmitExportTaskArgs_Request_DEFAULT *TExportTaskRequest
+var BackendServiceGetNextArgs_Params_DEFAULT *dorisexternalservice.TScanNextBatchParams
-func (p *BackendServiceSubmitExportTaskArgs) GetRequest() (v *TExportTaskRequest) {
- if !p.IsSetRequest() {
- return BackendServiceSubmitExportTaskArgs_Request_DEFAULT
+func (p *BackendServiceGetNextArgs) GetParams() (v *dorisexternalservice.TScanNextBatchParams) {
+ if !p.IsSetParams() {
+ return BackendServiceGetNextArgs_Params_DEFAULT
}
- return p.Request
+ return p.Params
}
-func (p *BackendServiceSubmitExportTaskArgs) SetRequest(val *TExportTaskRequest) {
- p.Request = val
+func (p *BackendServiceGetNextArgs) SetParams(val *dorisexternalservice.TScanNextBatchParams) {
+ p.Params = val
}
-var fieldIDToName_BackendServiceSubmitExportTaskArgs = map[int16]string{
- 1: "request",
+var fieldIDToName_BackendServiceGetNextArgs = map[int16]string{
+ 1: "params",
}
-func (p *BackendServiceSubmitExportTaskArgs) IsSetRequest() bool {
- return p.Request != nil
+func (p *BackendServiceGetNextArgs) IsSetParams() bool {
+ return p.Params != nil
}
-func (p *BackendServiceSubmitExportTaskArgs) Read(iprot thrift.TProtocol) (err error) {
+func (p *BackendServiceGetNextArgs) Read(iprot thrift.TProtocol) (err error) {
var fieldTypeId thrift.TType
var fieldId int16
@@ -10343,17 +22110,14 @@ func (p *BackendServiceSubmitExportTaskArgs) Read(iprot thrift.TProtocol) (err e
if err = p.ReadField1(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
default:
if err = iprot.Skip(fieldTypeId); err != nil {
goto SkipFieldError
}
}
-
if err = iprot.ReadFieldEnd(); err != nil {
goto ReadFieldEndError
}
@@ -10368,7 +22132,7 @@ ReadStructBeginError:
ReadFieldBeginError:
return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
ReadFieldError:
- return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceSubmitExportTaskArgs[fieldId]), err)
+ return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceGetNextArgs[fieldId]), err)
SkipFieldError:
return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
@@ -10378,17 +22142,18 @@ ReadStructEndError:
return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
}
-func (p *BackendServiceSubmitExportTaskArgs) ReadField1(iprot thrift.TProtocol) error {
- p.Request = NewTExportTaskRequest()
- if err := p.Request.Read(iprot); err != nil {
+func (p *BackendServiceGetNextArgs) ReadField1(iprot thrift.TProtocol) error {
+ _field := dorisexternalservice.NewTScanNextBatchParams()
+ if err := _field.Read(iprot); err != nil {
return err
}
+ p.Params = _field
return nil
}
-func (p *BackendServiceSubmitExportTaskArgs) Write(oprot thrift.TProtocol) (err error) {
+func (p *BackendServiceGetNextArgs) Write(oprot thrift.TProtocol) (err error) {
var fieldId int16
- if err = oprot.WriteStructBegin("submit_export_task_args"); err != nil {
+ if err = oprot.WriteStructBegin("get_next_args"); err != nil {
goto WriteStructBeginError
}
if p != nil {
@@ -10396,7 +22161,6 @@ func (p *BackendServiceSubmitExportTaskArgs) Write(oprot thrift.TProtocol) (err
fieldId = 1
goto WriteFieldError
}
-
}
if err = oprot.WriteFieldStop(); err != nil {
goto WriteFieldStopError
@@ -10415,11 +22179,11 @@ WriteStructEndError:
return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err)
}
-func (p *BackendServiceSubmitExportTaskArgs) writeField1(oprot thrift.TProtocol) (err error) {
- if err = oprot.WriteFieldBegin("request", thrift.STRUCT, 1); err != nil {
+func (p *BackendServiceGetNextArgs) writeField1(oprot thrift.TProtocol) (err error) {
+ if err = oprot.WriteFieldBegin("params", thrift.STRUCT, 1); err != nil {
goto WriteFieldBeginError
}
- if err := p.Request.Write(oprot); err != nil {
+ if err := p.Params.Write(oprot); err != nil {
return err
}
if err = oprot.WriteFieldEnd(); err != nil {
@@ -10432,66 +22196,66 @@ WriteFieldEndError:
return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err)
}
-func (p *BackendServiceSubmitExportTaskArgs) String() string {
+func (p *BackendServiceGetNextArgs) String() string {
if p == nil {
return ""
}
- return fmt.Sprintf("BackendServiceSubmitExportTaskArgs(%+v)", *p)
+ return fmt.Sprintf("BackendServiceGetNextArgs(%+v)", *p)
+
}
-func (p *BackendServiceSubmitExportTaskArgs) DeepEqual(ano *BackendServiceSubmitExportTaskArgs) bool {
+func (p *BackendServiceGetNextArgs) DeepEqual(ano *BackendServiceGetNextArgs) bool {
if p == ano {
return true
} else if p == nil || ano == nil {
return false
}
- if !p.Field1DeepEqual(ano.Request) {
+ if !p.Field1DeepEqual(ano.Params) {
return false
}
return true
}
-func (p *BackendServiceSubmitExportTaskArgs) Field1DeepEqual(src *TExportTaskRequest) bool {
+func (p *BackendServiceGetNextArgs) Field1DeepEqual(src *dorisexternalservice.TScanNextBatchParams) bool {
- if !p.Request.DeepEqual(src) {
+ if !p.Params.DeepEqual(src) {
return false
}
return true
}
-type BackendServiceSubmitExportTaskResult struct {
- Success *status.TStatus `thrift:"success,0,optional" frugal:"0,optional,status.TStatus" json:"success,omitempty"`
+type BackendServiceGetNextResult struct {
+ Success *dorisexternalservice.TScanBatchResult_ `thrift:"success,0,optional" frugal:"0,optional,dorisexternalservice.TScanBatchResult_" json:"success,omitempty"`
}
-func NewBackendServiceSubmitExportTaskResult() *BackendServiceSubmitExportTaskResult {
- return &BackendServiceSubmitExportTaskResult{}
+func NewBackendServiceGetNextResult() *BackendServiceGetNextResult {
+ return &BackendServiceGetNextResult{}
}
-func (p *BackendServiceSubmitExportTaskResult) InitDefault() {
- *p = BackendServiceSubmitExportTaskResult{}
+func (p *BackendServiceGetNextResult) InitDefault() {
}
-var BackendServiceSubmitExportTaskResult_Success_DEFAULT *status.TStatus
+var BackendServiceGetNextResult_Success_DEFAULT *dorisexternalservice.TScanBatchResult_
-func (p *BackendServiceSubmitExportTaskResult) GetSuccess() (v *status.TStatus) {
+func (p *BackendServiceGetNextResult) GetSuccess() (v *dorisexternalservice.TScanBatchResult_) {
if !p.IsSetSuccess() {
- return BackendServiceSubmitExportTaskResult_Success_DEFAULT
+ return BackendServiceGetNextResult_Success_DEFAULT
}
return p.Success
}
-func (p *BackendServiceSubmitExportTaskResult) SetSuccess(x interface{}) {
- p.Success = x.(*status.TStatus)
+func (p *BackendServiceGetNextResult) SetSuccess(x interface{}) {
+ p.Success = x.(*dorisexternalservice.TScanBatchResult_)
}
-var fieldIDToName_BackendServiceSubmitExportTaskResult = map[int16]string{
+var fieldIDToName_BackendServiceGetNextResult = map[int16]string{
0: "success",
}
-func (p *BackendServiceSubmitExportTaskResult) IsSetSuccess() bool {
+func (p *BackendServiceGetNextResult) IsSetSuccess() bool {
return p.Success != nil
}
-func (p *BackendServiceSubmitExportTaskResult) Read(iprot thrift.TProtocol) (err error) {
+func (p *BackendServiceGetNextResult) Read(iprot thrift.TProtocol) (err error) {
var fieldTypeId thrift.TType
var fieldId int16
@@ -10515,17 +22279,14 @@ func (p *BackendServiceSubmitExportTaskResult) Read(iprot thrift.TProtocol) (err
if err = p.ReadField0(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
default:
if err = iprot.Skip(fieldTypeId); err != nil {
goto SkipFieldError
}
}
-
if err = iprot.ReadFieldEnd(); err != nil {
goto ReadFieldEndError
}
@@ -10540,7 +22301,7 @@ ReadStructBeginError:
ReadFieldBeginError:
return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
ReadFieldError:
- return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceSubmitExportTaskResult[fieldId]), err)
+ return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceGetNextResult[fieldId]), err)
SkipFieldError:
return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
@@ -10550,17 +22311,18 @@ ReadStructEndError:
return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
}
-func (p *BackendServiceSubmitExportTaskResult) ReadField0(iprot thrift.TProtocol) error {
- p.Success = status.NewTStatus()
- if err := p.Success.Read(iprot); err != nil {
+func (p *BackendServiceGetNextResult) ReadField0(iprot thrift.TProtocol) error {
+ _field := dorisexternalservice.NewTScanBatchResult_()
+ if err := _field.Read(iprot); err != nil {
return err
}
+ p.Success = _field
return nil
}
-func (p *BackendServiceSubmitExportTaskResult) Write(oprot thrift.TProtocol) (err error) {
+func (p *BackendServiceGetNextResult) Write(oprot thrift.TProtocol) (err error) {
var fieldId int16
- if err = oprot.WriteStructBegin("submit_export_task_result"); err != nil {
+ if err = oprot.WriteStructBegin("get_next_result"); err != nil {
goto WriteStructBeginError
}
if p != nil {
@@ -10568,7 +22330,6 @@ func (p *BackendServiceSubmitExportTaskResult) Write(oprot thrift.TProtocol) (er
fieldId = 0
goto WriteFieldError
}
-
}
if err = oprot.WriteFieldStop(); err != nil {
goto WriteFieldStopError
@@ -10587,7 +22348,7 @@ WriteStructEndError:
return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err)
}
-func (p *BackendServiceSubmitExportTaskResult) writeField0(oprot thrift.TProtocol) (err error) {
+func (p *BackendServiceGetNextResult) writeField0(oprot thrift.TProtocol) (err error) {
if p.IsSetSuccess() {
if err = oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil {
goto WriteFieldBeginError
@@ -10606,14 +22367,15 @@ WriteFieldEndError:
return thrift.PrependError(fmt.Sprintf("%T write field 0 end error: ", p), err)
}
-func (p *BackendServiceSubmitExportTaskResult) String() string {
+func (p *BackendServiceGetNextResult) String() string {
if p == nil {
return ""
}
- return fmt.Sprintf("BackendServiceSubmitExportTaskResult(%+v)", *p)
+ return fmt.Sprintf("BackendServiceGetNextResult(%+v)", *p)
+
}
-func (p *BackendServiceSubmitExportTaskResult) DeepEqual(ano *BackendServiceSubmitExportTaskResult) bool {
+func (p *BackendServiceGetNextResult) DeepEqual(ano *BackendServiceGetNextResult) bool {
if p == ano {
return true
} else if p == nil || ano == nil {
@@ -10625,7 +22387,7 @@ func (p *BackendServiceSubmitExportTaskResult) DeepEqual(ano *BackendServiceSubm
return true
}
-func (p *BackendServiceSubmitExportTaskResult) Field0DeepEqual(src *status.TStatus) bool {
+func (p *BackendServiceGetNextResult) Field0DeepEqual(src *dorisexternalservice.TScanBatchResult_) bool {
if !p.Success.DeepEqual(src) {
return false
@@ -10633,39 +22395,38 @@ func (p *BackendServiceSubmitExportTaskResult) Field0DeepEqual(src *status.TStat
return true
}
-type BackendServiceGetExportStatusArgs struct {
- TaskId *types.TUniqueId `thrift:"task_id,1" frugal:"1,default,types.TUniqueId" json:"task_id"`
+type BackendServiceCloseScannerArgs struct {
+ Params *dorisexternalservice.TScanCloseParams `thrift:"params,1" frugal:"1,default,dorisexternalservice.TScanCloseParams" json:"params"`
}
-func NewBackendServiceGetExportStatusArgs() *BackendServiceGetExportStatusArgs {
- return &BackendServiceGetExportStatusArgs{}
+func NewBackendServiceCloseScannerArgs() *BackendServiceCloseScannerArgs {
+ return &BackendServiceCloseScannerArgs{}
}
-func (p *BackendServiceGetExportStatusArgs) InitDefault() {
- *p = BackendServiceGetExportStatusArgs{}
+func (p *BackendServiceCloseScannerArgs) InitDefault() {
}
-var BackendServiceGetExportStatusArgs_TaskId_DEFAULT *types.TUniqueId
+var BackendServiceCloseScannerArgs_Params_DEFAULT *dorisexternalservice.TScanCloseParams
-func (p *BackendServiceGetExportStatusArgs) GetTaskId() (v *types.TUniqueId) {
- if !p.IsSetTaskId() {
- return BackendServiceGetExportStatusArgs_TaskId_DEFAULT
+func (p *BackendServiceCloseScannerArgs) GetParams() (v *dorisexternalservice.TScanCloseParams) {
+ if !p.IsSetParams() {
+ return BackendServiceCloseScannerArgs_Params_DEFAULT
}
- return p.TaskId
+ return p.Params
}
-func (p *BackendServiceGetExportStatusArgs) SetTaskId(val *types.TUniqueId) {
- p.TaskId = val
+func (p *BackendServiceCloseScannerArgs) SetParams(val *dorisexternalservice.TScanCloseParams) {
+ p.Params = val
}
-var fieldIDToName_BackendServiceGetExportStatusArgs = map[int16]string{
- 1: "task_id",
+var fieldIDToName_BackendServiceCloseScannerArgs = map[int16]string{
+ 1: "params",
}
-func (p *BackendServiceGetExportStatusArgs) IsSetTaskId() bool {
- return p.TaskId != nil
+func (p *BackendServiceCloseScannerArgs) IsSetParams() bool {
+ return p.Params != nil
}
-func (p *BackendServiceGetExportStatusArgs) Read(iprot thrift.TProtocol) (err error) {
+func (p *BackendServiceCloseScannerArgs) Read(iprot thrift.TProtocol) (err error) {
var fieldTypeId thrift.TType
var fieldId int16
@@ -10689,17 +22450,14 @@ func (p *BackendServiceGetExportStatusArgs) Read(iprot thrift.TProtocol) (err er
if err = p.ReadField1(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
default:
if err = iprot.Skip(fieldTypeId); err != nil {
goto SkipFieldError
}
}
-
if err = iprot.ReadFieldEnd(); err != nil {
goto ReadFieldEndError
}
@@ -10714,7 +22472,7 @@ ReadStructBeginError:
ReadFieldBeginError:
return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
ReadFieldError:
- return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceGetExportStatusArgs[fieldId]), err)
+ return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceCloseScannerArgs[fieldId]), err)
SkipFieldError:
return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
@@ -10724,17 +22482,18 @@ ReadStructEndError:
return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
}
-func (p *BackendServiceGetExportStatusArgs) ReadField1(iprot thrift.TProtocol) error {
- p.TaskId = types.NewTUniqueId()
- if err := p.TaskId.Read(iprot); err != nil {
+func (p *BackendServiceCloseScannerArgs) ReadField1(iprot thrift.TProtocol) error {
+ _field := dorisexternalservice.NewTScanCloseParams()
+ if err := _field.Read(iprot); err != nil {
return err
}
+ p.Params = _field
return nil
}
-func (p *BackendServiceGetExportStatusArgs) Write(oprot thrift.TProtocol) (err error) {
+func (p *BackendServiceCloseScannerArgs) Write(oprot thrift.TProtocol) (err error) {
var fieldId int16
- if err = oprot.WriteStructBegin("get_export_status_args"); err != nil {
+ if err = oprot.WriteStructBegin("close_scanner_args"); err != nil {
goto WriteStructBeginError
}
if p != nil {
@@ -10742,7 +22501,6 @@ func (p *BackendServiceGetExportStatusArgs) Write(oprot thrift.TProtocol) (err e
fieldId = 1
goto WriteFieldError
}
-
}
if err = oprot.WriteFieldStop(); err != nil {
goto WriteFieldStopError
@@ -10761,11 +22519,11 @@ WriteStructEndError:
return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err)
}
-func (p *BackendServiceGetExportStatusArgs) writeField1(oprot thrift.TProtocol) (err error) {
- if err = oprot.WriteFieldBegin("task_id", thrift.STRUCT, 1); err != nil {
+func (p *BackendServiceCloseScannerArgs) writeField1(oprot thrift.TProtocol) (err error) {
+ if err = oprot.WriteFieldBegin("params", thrift.STRUCT, 1); err != nil {
goto WriteFieldBeginError
}
- if err := p.TaskId.Write(oprot); err != nil {
+ if err := p.Params.Write(oprot); err != nil {
return err
}
if err = oprot.WriteFieldEnd(); err != nil {
@@ -10778,66 +22536,66 @@ WriteFieldEndError:
return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err)
}
-func (p *BackendServiceGetExportStatusArgs) String() string {
+func (p *BackendServiceCloseScannerArgs) String() string {
if p == nil {
return ""
}
- return fmt.Sprintf("BackendServiceGetExportStatusArgs(%+v)", *p)
+ return fmt.Sprintf("BackendServiceCloseScannerArgs(%+v)", *p)
+
}
-func (p *BackendServiceGetExportStatusArgs) DeepEqual(ano *BackendServiceGetExportStatusArgs) bool {
+func (p *BackendServiceCloseScannerArgs) DeepEqual(ano *BackendServiceCloseScannerArgs) bool {
if p == ano {
return true
} else if p == nil || ano == nil {
return false
}
- if !p.Field1DeepEqual(ano.TaskId) {
+ if !p.Field1DeepEqual(ano.Params) {
return false
}
return true
}
-func (p *BackendServiceGetExportStatusArgs) Field1DeepEqual(src *types.TUniqueId) bool {
+func (p *BackendServiceCloseScannerArgs) Field1DeepEqual(src *dorisexternalservice.TScanCloseParams) bool {
- if !p.TaskId.DeepEqual(src) {
+ if !p.Params.DeepEqual(src) {
return false
}
return true
}
-type BackendServiceGetExportStatusResult struct {
- Success *palointernalservice.TExportStatusResult_ `thrift:"success,0,optional" frugal:"0,optional,palointernalservice.TExportStatusResult_" json:"success,omitempty"`
+type BackendServiceCloseScannerResult struct {
+ Success *dorisexternalservice.TScanCloseResult_ `thrift:"success,0,optional" frugal:"0,optional,dorisexternalservice.TScanCloseResult_" json:"success,omitempty"`
}
-func NewBackendServiceGetExportStatusResult() *BackendServiceGetExportStatusResult {
- return &BackendServiceGetExportStatusResult{}
+func NewBackendServiceCloseScannerResult() *BackendServiceCloseScannerResult {
+ return &BackendServiceCloseScannerResult{}
}
-func (p *BackendServiceGetExportStatusResult) InitDefault() {
- *p = BackendServiceGetExportStatusResult{}
+func (p *BackendServiceCloseScannerResult) InitDefault() {
}
-var BackendServiceGetExportStatusResult_Success_DEFAULT *palointernalservice.TExportStatusResult_
+var BackendServiceCloseScannerResult_Success_DEFAULT *dorisexternalservice.TScanCloseResult_
-func (p *BackendServiceGetExportStatusResult) GetSuccess() (v *palointernalservice.TExportStatusResult_) {
+func (p *BackendServiceCloseScannerResult) GetSuccess() (v *dorisexternalservice.TScanCloseResult_) {
if !p.IsSetSuccess() {
- return BackendServiceGetExportStatusResult_Success_DEFAULT
+ return BackendServiceCloseScannerResult_Success_DEFAULT
}
return p.Success
}
-func (p *BackendServiceGetExportStatusResult) SetSuccess(x interface{}) {
- p.Success = x.(*palointernalservice.TExportStatusResult_)
+func (p *BackendServiceCloseScannerResult) SetSuccess(x interface{}) {
+ p.Success = x.(*dorisexternalservice.TScanCloseResult_)
}
-var fieldIDToName_BackendServiceGetExportStatusResult = map[int16]string{
+var fieldIDToName_BackendServiceCloseScannerResult = map[int16]string{
0: "success",
}
-func (p *BackendServiceGetExportStatusResult) IsSetSuccess() bool {
+func (p *BackendServiceCloseScannerResult) IsSetSuccess() bool {
return p.Success != nil
}
-func (p *BackendServiceGetExportStatusResult) Read(iprot thrift.TProtocol) (err error) {
+func (p *BackendServiceCloseScannerResult) Read(iprot thrift.TProtocol) (err error) {
var fieldTypeId thrift.TType
var fieldId int16
@@ -10861,17 +22619,14 @@ func (p *BackendServiceGetExportStatusResult) Read(iprot thrift.TProtocol) (err
if err = p.ReadField0(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
default:
if err = iprot.Skip(fieldTypeId); err != nil {
goto SkipFieldError
}
}
-
if err = iprot.ReadFieldEnd(); err != nil {
goto ReadFieldEndError
}
@@ -10886,7 +22641,7 @@ ReadStructBeginError:
ReadFieldBeginError:
return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
ReadFieldError:
- return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceGetExportStatusResult[fieldId]), err)
+ return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceCloseScannerResult[fieldId]), err)
SkipFieldError:
return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
@@ -10896,17 +22651,18 @@ ReadStructEndError:
return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
}
-func (p *BackendServiceGetExportStatusResult) ReadField0(iprot thrift.TProtocol) error {
- p.Success = palointernalservice.NewTExportStatusResult_()
- if err := p.Success.Read(iprot); err != nil {
+func (p *BackendServiceCloseScannerResult) ReadField0(iprot thrift.TProtocol) error {
+ _field := dorisexternalservice.NewTScanCloseResult_()
+ if err := _field.Read(iprot); err != nil {
return err
}
+ p.Success = _field
return nil
}
-func (p *BackendServiceGetExportStatusResult) Write(oprot thrift.TProtocol) (err error) {
+func (p *BackendServiceCloseScannerResult) Write(oprot thrift.TProtocol) (err error) {
var fieldId int16
- if err = oprot.WriteStructBegin("get_export_status_result"); err != nil {
+ if err = oprot.WriteStructBegin("close_scanner_result"); err != nil {
goto WriteStructBeginError
}
if p != nil {
@@ -10914,7 +22670,6 @@ func (p *BackendServiceGetExportStatusResult) Write(oprot thrift.TProtocol) (err
fieldId = 0
goto WriteFieldError
}
-
}
if err = oprot.WriteFieldStop(); err != nil {
goto WriteFieldStopError
@@ -10933,7 +22688,7 @@ WriteStructEndError:
return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err)
}
-func (p *BackendServiceGetExportStatusResult) writeField0(oprot thrift.TProtocol) (err error) {
+func (p *BackendServiceCloseScannerResult) writeField0(oprot thrift.TProtocol) (err error) {
if p.IsSetSuccess() {
if err = oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil {
goto WriteFieldBeginError
@@ -10952,14 +22707,15 @@ WriteFieldEndError:
return thrift.PrependError(fmt.Sprintf("%T write field 0 end error: ", p), err)
}
-func (p *BackendServiceGetExportStatusResult) String() string {
+func (p *BackendServiceCloseScannerResult) String() string {
if p == nil {
return ""
}
- return fmt.Sprintf("BackendServiceGetExportStatusResult(%+v)", *p)
+ return fmt.Sprintf("BackendServiceCloseScannerResult(%+v)", *p)
+
}
-func (p *BackendServiceGetExportStatusResult) DeepEqual(ano *BackendServiceGetExportStatusResult) bool {
+func (p *BackendServiceCloseScannerResult) DeepEqual(ano *BackendServiceCloseScannerResult) bool {
if p == ano {
return true
} else if p == nil || ano == nil {
@@ -10971,7 +22727,7 @@ func (p *BackendServiceGetExportStatusResult) DeepEqual(ano *BackendServiceGetEx
return true
}
-func (p *BackendServiceGetExportStatusResult) Field0DeepEqual(src *palointernalservice.TExportStatusResult_) bool {
+func (p *BackendServiceCloseScannerResult) Field0DeepEqual(src *dorisexternalservice.TScanCloseResult_) bool {
if !p.Success.DeepEqual(src) {
return false
@@ -10979,39 +22735,29 @@ func (p *BackendServiceGetExportStatusResult) Field0DeepEqual(src *palointernals
return true
}
-type BackendServiceEraseExportTaskArgs struct {
- TaskId *types.TUniqueId `thrift:"task_id,1" frugal:"1,default,types.TUniqueId" json:"task_id"`
+type BackendServiceGetStreamLoadRecordArgs struct {
+ LastStreamRecordTime int64 `thrift:"last_stream_record_time,1" frugal:"1,default,i64" json:"last_stream_record_time"`
}
-func NewBackendServiceEraseExportTaskArgs() *BackendServiceEraseExportTaskArgs {
- return &BackendServiceEraseExportTaskArgs{}
+func NewBackendServiceGetStreamLoadRecordArgs() *BackendServiceGetStreamLoadRecordArgs {
+ return &BackendServiceGetStreamLoadRecordArgs{}
}
-func (p *BackendServiceEraseExportTaskArgs) InitDefault() {
- *p = BackendServiceEraseExportTaskArgs{}
+func (p *BackendServiceGetStreamLoadRecordArgs) InitDefault() {
}
-var BackendServiceEraseExportTaskArgs_TaskId_DEFAULT *types.TUniqueId
-
-func (p *BackendServiceEraseExportTaskArgs) GetTaskId() (v *types.TUniqueId) {
- if !p.IsSetTaskId() {
- return BackendServiceEraseExportTaskArgs_TaskId_DEFAULT
- }
- return p.TaskId
-}
-func (p *BackendServiceEraseExportTaskArgs) SetTaskId(val *types.TUniqueId) {
- p.TaskId = val
+func (p *BackendServiceGetStreamLoadRecordArgs) GetLastStreamRecordTime() (v int64) {
+ return p.LastStreamRecordTime
}
-
-var fieldIDToName_BackendServiceEraseExportTaskArgs = map[int16]string{
- 1: "task_id",
+func (p *BackendServiceGetStreamLoadRecordArgs) SetLastStreamRecordTime(val int64) {
+ p.LastStreamRecordTime = val
}
-func (p *BackendServiceEraseExportTaskArgs) IsSetTaskId() bool {
- return p.TaskId != nil
+var fieldIDToName_BackendServiceGetStreamLoadRecordArgs = map[int16]string{
+ 1: "last_stream_record_time",
}
-func (p *BackendServiceEraseExportTaskArgs) Read(iprot thrift.TProtocol) (err error) {
+func (p *BackendServiceGetStreamLoadRecordArgs) Read(iprot thrift.TProtocol) (err error) {
var fieldTypeId thrift.TType
var fieldId int16
@@ -11031,21 +22777,18 @@ func (p *BackendServiceEraseExportTaskArgs) Read(iprot thrift.TProtocol) (err er
switch fieldId {
case 1:
- if fieldTypeId == thrift.STRUCT {
+ if fieldTypeId == thrift.I64 {
if err = p.ReadField1(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
default:
if err = iprot.Skip(fieldTypeId); err != nil {
goto SkipFieldError
}
}
-
if err = iprot.ReadFieldEnd(); err != nil {
goto ReadFieldEndError
}
@@ -11060,7 +22803,7 @@ ReadStructBeginError:
ReadFieldBeginError:
return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
ReadFieldError:
- return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceEraseExportTaskArgs[fieldId]), err)
+ return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceGetStreamLoadRecordArgs[fieldId]), err)
SkipFieldError:
return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
@@ -11070,17 +22813,21 @@ ReadStructEndError:
return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
}
-func (p *BackendServiceEraseExportTaskArgs) ReadField1(iprot thrift.TProtocol) error {
- p.TaskId = types.NewTUniqueId()
- if err := p.TaskId.Read(iprot); err != nil {
+func (p *BackendServiceGetStreamLoadRecordArgs) ReadField1(iprot thrift.TProtocol) error {
+
+ var _field int64
+ if v, err := iprot.ReadI64(); err != nil {
return err
+ } else {
+ _field = v
}
+ p.LastStreamRecordTime = _field
return nil
}
-func (p *BackendServiceEraseExportTaskArgs) Write(oprot thrift.TProtocol) (err error) {
+func (p *BackendServiceGetStreamLoadRecordArgs) Write(oprot thrift.TProtocol) (err error) {
var fieldId int16
- if err = oprot.WriteStructBegin("erase_export_task_args"); err != nil {
+ if err = oprot.WriteStructBegin("get_stream_load_record_args"); err != nil {
goto WriteStructBeginError
}
if p != nil {
@@ -11088,7 +22835,6 @@ func (p *BackendServiceEraseExportTaskArgs) Write(oprot thrift.TProtocol) (err e
fieldId = 1
goto WriteFieldError
}
-
}
if err = oprot.WriteFieldStop(); err != nil {
goto WriteFieldStopError
@@ -11107,11 +22853,11 @@ WriteStructEndError:
return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err)
}
-func (p *BackendServiceEraseExportTaskArgs) writeField1(oprot thrift.TProtocol) (err error) {
- if err = oprot.WriteFieldBegin("task_id", thrift.STRUCT, 1); err != nil {
+func (p *BackendServiceGetStreamLoadRecordArgs) writeField1(oprot thrift.TProtocol) (err error) {
+ if err = oprot.WriteFieldBegin("last_stream_record_time", thrift.I64, 1); err != nil {
goto WriteFieldBeginError
}
- if err := p.TaskId.Write(oprot); err != nil {
+ if err := oprot.WriteI64(p.LastStreamRecordTime); err != nil {
return err
}
if err = oprot.WriteFieldEnd(); err != nil {
@@ -11124,66 +22870,66 @@ WriteFieldEndError:
return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err)
}
-func (p *BackendServiceEraseExportTaskArgs) String() string {
+func (p *BackendServiceGetStreamLoadRecordArgs) String() string {
if p == nil {
return ""
}
- return fmt.Sprintf("BackendServiceEraseExportTaskArgs(%+v)", *p)
+ return fmt.Sprintf("BackendServiceGetStreamLoadRecordArgs(%+v)", *p)
+
}
-func (p *BackendServiceEraseExportTaskArgs) DeepEqual(ano *BackendServiceEraseExportTaskArgs) bool {
+func (p *BackendServiceGetStreamLoadRecordArgs) DeepEqual(ano *BackendServiceGetStreamLoadRecordArgs) bool {
if p == ano {
return true
} else if p == nil || ano == nil {
return false
}
- if !p.Field1DeepEqual(ano.TaskId) {
+ if !p.Field1DeepEqual(ano.LastStreamRecordTime) {
return false
}
return true
}
-func (p *BackendServiceEraseExportTaskArgs) Field1DeepEqual(src *types.TUniqueId) bool {
+func (p *BackendServiceGetStreamLoadRecordArgs) Field1DeepEqual(src int64) bool {
- if !p.TaskId.DeepEqual(src) {
+ if p.LastStreamRecordTime != src {
return false
}
return true
}
-type BackendServiceEraseExportTaskResult struct {
- Success *status.TStatus `thrift:"success,0,optional" frugal:"0,optional,status.TStatus" json:"success,omitempty"`
+type BackendServiceGetStreamLoadRecordResult struct {
+ Success *TStreamLoadRecordResult_ `thrift:"success,0,optional" frugal:"0,optional,TStreamLoadRecordResult_" json:"success,omitempty"`
}
-func NewBackendServiceEraseExportTaskResult() *BackendServiceEraseExportTaskResult {
- return &BackendServiceEraseExportTaskResult{}
+func NewBackendServiceGetStreamLoadRecordResult() *BackendServiceGetStreamLoadRecordResult {
+ return &BackendServiceGetStreamLoadRecordResult{}
}
-func (p *BackendServiceEraseExportTaskResult) InitDefault() {
- *p = BackendServiceEraseExportTaskResult{}
+func (p *BackendServiceGetStreamLoadRecordResult) InitDefault() {
}
-var BackendServiceEraseExportTaskResult_Success_DEFAULT *status.TStatus
+var BackendServiceGetStreamLoadRecordResult_Success_DEFAULT *TStreamLoadRecordResult_
-func (p *BackendServiceEraseExportTaskResult) GetSuccess() (v *status.TStatus) {
+func (p *BackendServiceGetStreamLoadRecordResult) GetSuccess() (v *TStreamLoadRecordResult_) {
if !p.IsSetSuccess() {
- return BackendServiceEraseExportTaskResult_Success_DEFAULT
+ return BackendServiceGetStreamLoadRecordResult_Success_DEFAULT
}
return p.Success
}
-func (p *BackendServiceEraseExportTaskResult) SetSuccess(x interface{}) {
- p.Success = x.(*status.TStatus)
+func (p *BackendServiceGetStreamLoadRecordResult) SetSuccess(x interface{}) {
+ p.Success = x.(*TStreamLoadRecordResult_)
}
-var fieldIDToName_BackendServiceEraseExportTaskResult = map[int16]string{
+var fieldIDToName_BackendServiceGetStreamLoadRecordResult = map[int16]string{
0: "success",
}
-func (p *BackendServiceEraseExportTaskResult) IsSetSuccess() bool {
+func (p *BackendServiceGetStreamLoadRecordResult) IsSetSuccess() bool {
return p.Success != nil
}
-func (p *BackendServiceEraseExportTaskResult) Read(iprot thrift.TProtocol) (err error) {
+func (p *BackendServiceGetStreamLoadRecordResult) Read(iprot thrift.TProtocol) (err error) {
var fieldTypeId thrift.TType
var fieldId int16
@@ -11207,17 +22953,14 @@ func (p *BackendServiceEraseExportTaskResult) Read(iprot thrift.TProtocol) (err
if err = p.ReadField0(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
default:
if err = iprot.Skip(fieldTypeId); err != nil {
goto SkipFieldError
}
}
-
if err = iprot.ReadFieldEnd(); err != nil {
goto ReadFieldEndError
}
@@ -11232,7 +22975,7 @@ ReadStructBeginError:
ReadFieldBeginError:
return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
ReadFieldError:
- return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceEraseExportTaskResult[fieldId]), err)
+ return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceGetStreamLoadRecordResult[fieldId]), err)
SkipFieldError:
return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
@@ -11242,17 +22985,18 @@ ReadStructEndError:
return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
}
-func (p *BackendServiceEraseExportTaskResult) ReadField0(iprot thrift.TProtocol) error {
- p.Success = status.NewTStatus()
- if err := p.Success.Read(iprot); err != nil {
+func (p *BackendServiceGetStreamLoadRecordResult) ReadField0(iprot thrift.TProtocol) error {
+ _field := NewTStreamLoadRecordResult_()
+ if err := _field.Read(iprot); err != nil {
return err
}
+ p.Success = _field
return nil
}
-func (p *BackendServiceEraseExportTaskResult) Write(oprot thrift.TProtocol) (err error) {
+func (p *BackendServiceGetStreamLoadRecordResult) Write(oprot thrift.TProtocol) (err error) {
var fieldId int16
- if err = oprot.WriteStructBegin("erase_export_task_result"); err != nil {
+ if err = oprot.WriteStructBegin("get_stream_load_record_result"); err != nil {
goto WriteStructBeginError
}
if p != nil {
@@ -11260,7 +23004,6 @@ func (p *BackendServiceEraseExportTaskResult) Write(oprot thrift.TProtocol) (err
fieldId = 0
goto WriteFieldError
}
-
}
if err = oprot.WriteFieldStop(); err != nil {
goto WriteFieldStopError
@@ -11279,7 +23022,7 @@ WriteStructEndError:
return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err)
}
-func (p *BackendServiceEraseExportTaskResult) writeField0(oprot thrift.TProtocol) (err error) {
+func (p *BackendServiceGetStreamLoadRecordResult) writeField0(oprot thrift.TProtocol) (err error) {
if p.IsSetSuccess() {
if err = oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil {
goto WriteFieldBeginError
@@ -11298,14 +23041,15 @@ WriteFieldEndError:
return thrift.PrependError(fmt.Sprintf("%T write field 0 end error: ", p), err)
}
-func (p *BackendServiceEraseExportTaskResult) String() string {
+func (p *BackendServiceGetStreamLoadRecordResult) String() string {
if p == nil {
return ""
}
- return fmt.Sprintf("BackendServiceEraseExportTaskResult(%+v)", *p)
+ return fmt.Sprintf("BackendServiceGetStreamLoadRecordResult(%+v)", *p)
+
}
-func (p *BackendServiceEraseExportTaskResult) DeepEqual(ano *BackendServiceEraseExportTaskResult) bool {
+func (p *BackendServiceGetStreamLoadRecordResult) DeepEqual(ano *BackendServiceGetStreamLoadRecordResult) bool {
if p == ano {
return true
} else if p == nil || ano == nil {
@@ -11317,7 +23061,7 @@ func (p *BackendServiceEraseExportTaskResult) DeepEqual(ano *BackendServiceErase
return true
}
-func (p *BackendServiceEraseExportTaskResult) Field0DeepEqual(src *status.TStatus) bool {
+func (p *BackendServiceGetStreamLoadRecordResult) Field0DeepEqual(src *TStreamLoadRecordResult_) bool {
if !p.Success.DeepEqual(src) {
return false
@@ -11325,20 +23069,19 @@ func (p *BackendServiceEraseExportTaskResult) Field0DeepEqual(src *status.TStatu
return true
}
-type BackendServiceGetTabletStatArgs struct {
+type BackendServiceCheckStorageFormatArgs struct {
}
-func NewBackendServiceGetTabletStatArgs() *BackendServiceGetTabletStatArgs {
- return &BackendServiceGetTabletStatArgs{}
+func NewBackendServiceCheckStorageFormatArgs() *BackendServiceCheckStorageFormatArgs {
+ return &BackendServiceCheckStorageFormatArgs{}
}
-func (p *BackendServiceGetTabletStatArgs) InitDefault() {
- *p = BackendServiceGetTabletStatArgs{}
+func (p *BackendServiceCheckStorageFormatArgs) InitDefault() {
}
-var fieldIDToName_BackendServiceGetTabletStatArgs = map[int16]string{}
+var fieldIDToName_BackendServiceCheckStorageFormatArgs = map[int16]string{}
-func (p *BackendServiceGetTabletStatArgs) Read(iprot thrift.TProtocol) (err error) {
+func (p *BackendServiceCheckStorageFormatArgs) Read(iprot thrift.TProtocol) (err error) {
var fieldTypeId thrift.TType
var fieldId int16
@@ -11358,7 +23101,6 @@ func (p *BackendServiceGetTabletStatArgs) Read(iprot thrift.TProtocol) (err erro
if err = iprot.Skip(fieldTypeId); err != nil {
goto SkipFieldTypeError
}
-
if err = iprot.ReadFieldEnd(); err != nil {
goto ReadFieldEndError
}
@@ -11381,12 +23123,11 @@ ReadStructEndError:
return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
}
-func (p *BackendServiceGetTabletStatArgs) Write(oprot thrift.TProtocol) (err error) {
- if err = oprot.WriteStructBegin("get_tablet_stat_args"); err != nil {
+func (p *BackendServiceCheckStorageFormatArgs) Write(oprot thrift.TProtocol) (err error) {
+ if err = oprot.WriteStructBegin("check_storage_format_args"); err != nil {
goto WriteStructBeginError
}
if p != nil {
-
}
if err = oprot.WriteFieldStop(); err != nil {
goto WriteFieldStopError
@@ -11403,14 +23144,15 @@ WriteStructEndError:
return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err)
}
-func (p *BackendServiceGetTabletStatArgs) String() string {
+func (p *BackendServiceCheckStorageFormatArgs) String() string {
if p == nil {
return ""
}
- return fmt.Sprintf("BackendServiceGetTabletStatArgs(%+v)", *p)
+ return fmt.Sprintf("BackendServiceCheckStorageFormatArgs(%+v)", *p)
+
}
-func (p *BackendServiceGetTabletStatArgs) DeepEqual(ano *BackendServiceGetTabletStatArgs) bool {
+func (p *BackendServiceCheckStorageFormatArgs) DeepEqual(ano *BackendServiceCheckStorageFormatArgs) bool {
if p == ano {
return true
} else if p == nil || ano == nil {
@@ -11419,39 +23161,38 @@ func (p *BackendServiceGetTabletStatArgs) DeepEqual(ano *BackendServiceGetTablet
return true
}
-type BackendServiceGetTabletStatResult struct {
- Success *TTabletStatResult_ `thrift:"success,0,optional" frugal:"0,optional,TTabletStatResult_" json:"success,omitempty"`
+type BackendServiceCheckStorageFormatResult struct {
+ Success *TCheckStorageFormatResult_ `thrift:"success,0,optional" frugal:"0,optional,TCheckStorageFormatResult_" json:"success,omitempty"`
}
-func NewBackendServiceGetTabletStatResult() *BackendServiceGetTabletStatResult {
- return &BackendServiceGetTabletStatResult{}
+func NewBackendServiceCheckStorageFormatResult() *BackendServiceCheckStorageFormatResult {
+ return &BackendServiceCheckStorageFormatResult{}
}
-func (p *BackendServiceGetTabletStatResult) InitDefault() {
- *p = BackendServiceGetTabletStatResult{}
+func (p *BackendServiceCheckStorageFormatResult) InitDefault() {
}
-var BackendServiceGetTabletStatResult_Success_DEFAULT *TTabletStatResult_
+var BackendServiceCheckStorageFormatResult_Success_DEFAULT *TCheckStorageFormatResult_
-func (p *BackendServiceGetTabletStatResult) GetSuccess() (v *TTabletStatResult_) {
+func (p *BackendServiceCheckStorageFormatResult) GetSuccess() (v *TCheckStorageFormatResult_) {
if !p.IsSetSuccess() {
- return BackendServiceGetTabletStatResult_Success_DEFAULT
+ return BackendServiceCheckStorageFormatResult_Success_DEFAULT
}
return p.Success
}
-func (p *BackendServiceGetTabletStatResult) SetSuccess(x interface{}) {
- p.Success = x.(*TTabletStatResult_)
+func (p *BackendServiceCheckStorageFormatResult) SetSuccess(x interface{}) {
+ p.Success = x.(*TCheckStorageFormatResult_)
}
-var fieldIDToName_BackendServiceGetTabletStatResult = map[int16]string{
+var fieldIDToName_BackendServiceCheckStorageFormatResult = map[int16]string{
0: "success",
}
-func (p *BackendServiceGetTabletStatResult) IsSetSuccess() bool {
+func (p *BackendServiceCheckStorageFormatResult) IsSetSuccess() bool {
return p.Success != nil
}
-func (p *BackendServiceGetTabletStatResult) Read(iprot thrift.TProtocol) (err error) {
+func (p *BackendServiceCheckStorageFormatResult) Read(iprot thrift.TProtocol) (err error) {
var fieldTypeId thrift.TType
var fieldId int16
@@ -11475,17 +23216,14 @@ func (p *BackendServiceGetTabletStatResult) Read(iprot thrift.TProtocol) (err er
if err = p.ReadField0(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
default:
if err = iprot.Skip(fieldTypeId); err != nil {
goto SkipFieldError
}
}
-
if err = iprot.ReadFieldEnd(); err != nil {
goto ReadFieldEndError
}
@@ -11500,7 +23238,7 @@ ReadStructBeginError:
ReadFieldBeginError:
return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
ReadFieldError:
- return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceGetTabletStatResult[fieldId]), err)
+ return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceCheckStorageFormatResult[fieldId]), err)
SkipFieldError:
return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
@@ -11510,17 +23248,18 @@ ReadStructEndError:
return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
}
-func (p *BackendServiceGetTabletStatResult) ReadField0(iprot thrift.TProtocol) error {
- p.Success = NewTTabletStatResult_()
- if err := p.Success.Read(iprot); err != nil {
+func (p *BackendServiceCheckStorageFormatResult) ReadField0(iprot thrift.TProtocol) error {
+ _field := NewTCheckStorageFormatResult_()
+ if err := _field.Read(iprot); err != nil {
return err
}
+ p.Success = _field
return nil
}
-func (p *BackendServiceGetTabletStatResult) Write(oprot thrift.TProtocol) (err error) {
+func (p *BackendServiceCheckStorageFormatResult) Write(oprot thrift.TProtocol) (err error) {
var fieldId int16
- if err = oprot.WriteStructBegin("get_tablet_stat_result"); err != nil {
+ if err = oprot.WriteStructBegin("check_storage_format_result"); err != nil {
goto WriteStructBeginError
}
if p != nil {
@@ -11528,7 +23267,6 @@ func (p *BackendServiceGetTabletStatResult) Write(oprot thrift.TProtocol) (err e
fieldId = 0
goto WriteFieldError
}
-
}
if err = oprot.WriteFieldStop(); err != nil {
goto WriteFieldStopError
@@ -11547,7 +23285,7 @@ WriteStructEndError:
return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err)
}
-func (p *BackendServiceGetTabletStatResult) writeField0(oprot thrift.TProtocol) (err error) {
+func (p *BackendServiceCheckStorageFormatResult) writeField0(oprot thrift.TProtocol) (err error) {
if p.IsSetSuccess() {
if err = oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil {
goto WriteFieldBeginError
@@ -11566,14 +23304,15 @@ WriteFieldEndError:
return thrift.PrependError(fmt.Sprintf("%T write field 0 end error: ", p), err)
}
-func (p *BackendServiceGetTabletStatResult) String() string {
+func (p *BackendServiceCheckStorageFormatResult) String() string {
if p == nil {
return ""
}
- return fmt.Sprintf("BackendServiceGetTabletStatResult(%+v)", *p)
+ return fmt.Sprintf("BackendServiceCheckStorageFormatResult(%+v)", *p)
+
}
-func (p *BackendServiceGetTabletStatResult) DeepEqual(ano *BackendServiceGetTabletStatResult) bool {
+func (p *BackendServiceCheckStorageFormatResult) DeepEqual(ano *BackendServiceCheckStorageFormatResult) bool {
if p == ano {
return true
} else if p == nil || ano == nil {
@@ -11585,7 +23324,7 @@ func (p *BackendServiceGetTabletStatResult) DeepEqual(ano *BackendServiceGetTabl
return true
}
-func (p *BackendServiceGetTabletStatResult) Field0DeepEqual(src *TTabletStatResult_) bool {
+func (p *BackendServiceCheckStorageFormatResult) Field0DeepEqual(src *TCheckStorageFormatResult_) bool {
if !p.Success.DeepEqual(src) {
return false
@@ -11593,20 +23332,38 @@ func (p *BackendServiceGetTabletStatResult) Field0DeepEqual(src *TTabletStatResu
return true
}
-type BackendServiceGetTrashUsedCapacityArgs struct {
+type BackendServiceWarmUpCacheAsyncArgs struct {
+ Request *TWarmUpCacheAsyncRequest `thrift:"request,1" frugal:"1,default,TWarmUpCacheAsyncRequest" json:"request"`
}
-func NewBackendServiceGetTrashUsedCapacityArgs() *BackendServiceGetTrashUsedCapacityArgs {
- return &BackendServiceGetTrashUsedCapacityArgs{}
+func NewBackendServiceWarmUpCacheAsyncArgs() *BackendServiceWarmUpCacheAsyncArgs {
+ return &BackendServiceWarmUpCacheAsyncArgs{}
}
-func (p *BackendServiceGetTrashUsedCapacityArgs) InitDefault() {
- *p = BackendServiceGetTrashUsedCapacityArgs{}
+func (p *BackendServiceWarmUpCacheAsyncArgs) InitDefault() {
}
-var fieldIDToName_BackendServiceGetTrashUsedCapacityArgs = map[int16]string{}
+var BackendServiceWarmUpCacheAsyncArgs_Request_DEFAULT *TWarmUpCacheAsyncRequest
-func (p *BackendServiceGetTrashUsedCapacityArgs) Read(iprot thrift.TProtocol) (err error) {
+func (p *BackendServiceWarmUpCacheAsyncArgs) GetRequest() (v *TWarmUpCacheAsyncRequest) {
+ if !p.IsSetRequest() {
+ return BackendServiceWarmUpCacheAsyncArgs_Request_DEFAULT
+ }
+ return p.Request
+}
+func (p *BackendServiceWarmUpCacheAsyncArgs) SetRequest(val *TWarmUpCacheAsyncRequest) {
+ p.Request = val
+}
+
+var fieldIDToName_BackendServiceWarmUpCacheAsyncArgs = map[int16]string{
+ 1: "request",
+}
+
+func (p *BackendServiceWarmUpCacheAsyncArgs) IsSetRequest() bool {
+ return p.Request != nil
+}
+
+func (p *BackendServiceWarmUpCacheAsyncArgs) Read(iprot thrift.TProtocol) (err error) {
var fieldTypeId thrift.TType
var fieldId int16
@@ -11623,10 +23380,21 @@ func (p *BackendServiceGetTrashUsedCapacityArgs) Read(iprot thrift.TProtocol) (e
if fieldTypeId == thrift.STOP {
break
}
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldTypeError
- }
+ switch fieldId {
+ case 1:
+ if fieldTypeId == thrift.STRUCT {
+ if err = p.ReadField1(iprot); err != nil {
+ goto ReadFieldError
+ }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ default:
+ if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ }
if err = iprot.ReadFieldEnd(); err != nil {
goto ReadFieldEndError
}
@@ -11640,8 +23408,10 @@ ReadStructBeginError:
return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err)
ReadFieldBeginError:
return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
-SkipFieldTypeError:
- return thrift.PrependError(fmt.Sprintf("%T skip field type %d error", p, fieldTypeId), err)
+ReadFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceWarmUpCacheAsyncArgs[fieldId]), err)
+SkipFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
ReadFieldEndError:
return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err)
@@ -11649,12 +23419,25 @@ ReadStructEndError:
return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
}
-func (p *BackendServiceGetTrashUsedCapacityArgs) Write(oprot thrift.TProtocol) (err error) {
- if err = oprot.WriteStructBegin("get_trash_used_capacity_args"); err != nil {
+func (p *BackendServiceWarmUpCacheAsyncArgs) ReadField1(iprot thrift.TProtocol) error {
+ _field := NewTWarmUpCacheAsyncRequest()
+ if err := _field.Read(iprot); err != nil {
+ return err
+ }
+ p.Request = _field
+ return nil
+}
+
+func (p *BackendServiceWarmUpCacheAsyncArgs) Write(oprot thrift.TProtocol) (err error) {
+ var fieldId int16
+ if err = oprot.WriteStructBegin("warm_up_cache_async_args"); err != nil {
goto WriteStructBeginError
}
if p != nil {
-
+ if err = p.writeField1(oprot); err != nil {
+ fieldId = 1
+ goto WriteFieldError
+ }
}
if err = oprot.WriteFieldStop(); err != nil {
goto WriteFieldStopError
@@ -11665,61 +23448,91 @@ func (p *BackendServiceGetTrashUsedCapacityArgs) Write(oprot thrift.TProtocol) (
return nil
WriteStructBeginError:
return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+WriteFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err)
WriteFieldStopError:
return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err)
WriteStructEndError:
return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err)
}
-func (p *BackendServiceGetTrashUsedCapacityArgs) String() string {
+func (p *BackendServiceWarmUpCacheAsyncArgs) writeField1(oprot thrift.TProtocol) (err error) {
+ if err = oprot.WriteFieldBegin("request", thrift.STRUCT, 1); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := p.Request.Write(oprot); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err)
+}
+
+func (p *BackendServiceWarmUpCacheAsyncArgs) String() string {
if p == nil {
return ""
}
- return fmt.Sprintf("BackendServiceGetTrashUsedCapacityArgs(%+v)", *p)
+ return fmt.Sprintf("BackendServiceWarmUpCacheAsyncArgs(%+v)", *p)
+
}
-func (p *BackendServiceGetTrashUsedCapacityArgs) DeepEqual(ano *BackendServiceGetTrashUsedCapacityArgs) bool {
+func (p *BackendServiceWarmUpCacheAsyncArgs) DeepEqual(ano *BackendServiceWarmUpCacheAsyncArgs) bool {
if p == ano {
return true
} else if p == nil || ano == nil {
return false
}
+ if !p.Field1DeepEqual(ano.Request) {
+ return false
+ }
return true
}
-type BackendServiceGetTrashUsedCapacityResult struct {
- Success *int64 `thrift:"success,0,optional" frugal:"0,optional,i64" json:"success,omitempty"`
+func (p *BackendServiceWarmUpCacheAsyncArgs) Field1DeepEqual(src *TWarmUpCacheAsyncRequest) bool {
+
+ if !p.Request.DeepEqual(src) {
+ return false
+ }
+ return true
}
-func NewBackendServiceGetTrashUsedCapacityResult() *BackendServiceGetTrashUsedCapacityResult {
- return &BackendServiceGetTrashUsedCapacityResult{}
+type BackendServiceWarmUpCacheAsyncResult struct {
+ Success *TWarmUpCacheAsyncResponse `thrift:"success,0,optional" frugal:"0,optional,TWarmUpCacheAsyncResponse" json:"success,omitempty"`
}
-func (p *BackendServiceGetTrashUsedCapacityResult) InitDefault() {
- *p = BackendServiceGetTrashUsedCapacityResult{}
+func NewBackendServiceWarmUpCacheAsyncResult() *BackendServiceWarmUpCacheAsyncResult {
+ return &BackendServiceWarmUpCacheAsyncResult{}
}
-var BackendServiceGetTrashUsedCapacityResult_Success_DEFAULT int64
+func (p *BackendServiceWarmUpCacheAsyncResult) InitDefault() {
+}
-func (p *BackendServiceGetTrashUsedCapacityResult) GetSuccess() (v int64) {
+var BackendServiceWarmUpCacheAsyncResult_Success_DEFAULT *TWarmUpCacheAsyncResponse
+
+func (p *BackendServiceWarmUpCacheAsyncResult) GetSuccess() (v *TWarmUpCacheAsyncResponse) {
if !p.IsSetSuccess() {
- return BackendServiceGetTrashUsedCapacityResult_Success_DEFAULT
+ return BackendServiceWarmUpCacheAsyncResult_Success_DEFAULT
}
- return *p.Success
+ return p.Success
}
-func (p *BackendServiceGetTrashUsedCapacityResult) SetSuccess(x interface{}) {
- p.Success = x.(*int64)
+func (p *BackendServiceWarmUpCacheAsyncResult) SetSuccess(x interface{}) {
+ p.Success = x.(*TWarmUpCacheAsyncResponse)
}
-var fieldIDToName_BackendServiceGetTrashUsedCapacityResult = map[int16]string{
+var fieldIDToName_BackendServiceWarmUpCacheAsyncResult = map[int16]string{
0: "success",
}
-func (p *BackendServiceGetTrashUsedCapacityResult) IsSetSuccess() bool {
+func (p *BackendServiceWarmUpCacheAsyncResult) IsSetSuccess() bool {
return p.Success != nil
}
-func (p *BackendServiceGetTrashUsedCapacityResult) Read(iprot thrift.TProtocol) (err error) {
+func (p *BackendServiceWarmUpCacheAsyncResult) Read(iprot thrift.TProtocol) (err error) {
var fieldTypeId thrift.TType
var fieldId int16
@@ -11739,21 +23552,18 @@ func (p *BackendServiceGetTrashUsedCapacityResult) Read(iprot thrift.TProtocol)
switch fieldId {
case 0:
- if fieldTypeId == thrift.I64 {
+ if fieldTypeId == thrift.STRUCT {
if err = p.ReadField0(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
default:
if err = iprot.Skip(fieldTypeId); err != nil {
goto SkipFieldError
}
}
-
if err = iprot.ReadFieldEnd(); err != nil {
goto ReadFieldEndError
}
@@ -11768,7 +23578,7 @@ ReadStructBeginError:
ReadFieldBeginError:
return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
ReadFieldError:
- return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceGetTrashUsedCapacityResult[fieldId]), err)
+ return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceWarmUpCacheAsyncResult[fieldId]), err)
SkipFieldError:
return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
@@ -11778,18 +23588,18 @@ ReadStructEndError:
return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
}
-func (p *BackendServiceGetTrashUsedCapacityResult) ReadField0(iprot thrift.TProtocol) error {
- if v, err := iprot.ReadI64(); err != nil {
+func (p *BackendServiceWarmUpCacheAsyncResult) ReadField0(iprot thrift.TProtocol) error {
+ _field := NewTWarmUpCacheAsyncResponse()
+ if err := _field.Read(iprot); err != nil {
return err
- } else {
- p.Success = &v
}
+ p.Success = _field
return nil
}
-func (p *BackendServiceGetTrashUsedCapacityResult) Write(oprot thrift.TProtocol) (err error) {
+func (p *BackendServiceWarmUpCacheAsyncResult) Write(oprot thrift.TProtocol) (err error) {
var fieldId int16
- if err = oprot.WriteStructBegin("get_trash_used_capacity_result"); err != nil {
+ if err = oprot.WriteStructBegin("warm_up_cache_async_result"); err != nil {
goto WriteStructBeginError
}
if p != nil {
@@ -11797,7 +23607,6 @@ func (p *BackendServiceGetTrashUsedCapacityResult) Write(oprot thrift.TProtocol)
fieldId = 0
goto WriteFieldError
}
-
}
if err = oprot.WriteFieldStop(); err != nil {
goto WriteFieldStopError
@@ -11816,12 +23625,12 @@ WriteStructEndError:
return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err)
}
-func (p *BackendServiceGetTrashUsedCapacityResult) writeField0(oprot thrift.TProtocol) (err error) {
+func (p *BackendServiceWarmUpCacheAsyncResult) writeField0(oprot thrift.TProtocol) (err error) {
if p.IsSetSuccess() {
- if err = oprot.WriteFieldBegin("success", thrift.I64, 0); err != nil {
+ if err = oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil {
goto WriteFieldBeginError
}
- if err := oprot.WriteI64(*p.Success); err != nil {
+ if err := p.Success.Write(oprot); err != nil {
return err
}
if err = oprot.WriteFieldEnd(); err != nil {
@@ -11835,14 +23644,15 @@ WriteFieldEndError:
return thrift.PrependError(fmt.Sprintf("%T write field 0 end error: ", p), err)
}
-func (p *BackendServiceGetTrashUsedCapacityResult) String() string {
+func (p *BackendServiceWarmUpCacheAsyncResult) String() string {
if p == nil {
return ""
}
- return fmt.Sprintf("BackendServiceGetTrashUsedCapacityResult(%+v)", *p)
+ return fmt.Sprintf("BackendServiceWarmUpCacheAsyncResult(%+v)", *p)
+
}
-func (p *BackendServiceGetTrashUsedCapacityResult) DeepEqual(ano *BackendServiceGetTrashUsedCapacityResult) bool {
+func (p *BackendServiceWarmUpCacheAsyncResult) DeepEqual(ano *BackendServiceWarmUpCacheAsyncResult) bool {
if p == ano {
return true
} else if p == nil || ano == nil {
@@ -11854,33 +23664,46 @@ func (p *BackendServiceGetTrashUsedCapacityResult) DeepEqual(ano *BackendService
return true
}
-func (p *BackendServiceGetTrashUsedCapacityResult) Field0DeepEqual(src *int64) bool {
+func (p *BackendServiceWarmUpCacheAsyncResult) Field0DeepEqual(src *TWarmUpCacheAsyncResponse) bool {
- if p.Success == src {
- return true
- } else if p.Success == nil || src == nil {
- return false
- }
- if *p.Success != *src {
+ if !p.Success.DeepEqual(src) {
return false
}
return true
}
-type BackendServiceGetDiskTrashUsedCapacityArgs struct {
+type BackendServiceCheckWarmUpCacheAsyncArgs struct {
+ Request *TCheckWarmUpCacheAsyncRequest `thrift:"request,1" frugal:"1,default,TCheckWarmUpCacheAsyncRequest" json:"request"`
}
-func NewBackendServiceGetDiskTrashUsedCapacityArgs() *BackendServiceGetDiskTrashUsedCapacityArgs {
- return &BackendServiceGetDiskTrashUsedCapacityArgs{}
+func NewBackendServiceCheckWarmUpCacheAsyncArgs() *BackendServiceCheckWarmUpCacheAsyncArgs {
+ return &BackendServiceCheckWarmUpCacheAsyncArgs{}
}
-func (p *BackendServiceGetDiskTrashUsedCapacityArgs) InitDefault() {
- *p = BackendServiceGetDiskTrashUsedCapacityArgs{}
+func (p *BackendServiceCheckWarmUpCacheAsyncArgs) InitDefault() {
}
-var fieldIDToName_BackendServiceGetDiskTrashUsedCapacityArgs = map[int16]string{}
+var BackendServiceCheckWarmUpCacheAsyncArgs_Request_DEFAULT *TCheckWarmUpCacheAsyncRequest
-func (p *BackendServiceGetDiskTrashUsedCapacityArgs) Read(iprot thrift.TProtocol) (err error) {
+func (p *BackendServiceCheckWarmUpCacheAsyncArgs) GetRequest() (v *TCheckWarmUpCacheAsyncRequest) {
+ if !p.IsSetRequest() {
+ return BackendServiceCheckWarmUpCacheAsyncArgs_Request_DEFAULT
+ }
+ return p.Request
+}
+func (p *BackendServiceCheckWarmUpCacheAsyncArgs) SetRequest(val *TCheckWarmUpCacheAsyncRequest) {
+ p.Request = val
+}
+
+var fieldIDToName_BackendServiceCheckWarmUpCacheAsyncArgs = map[int16]string{
+ 1: "request",
+}
+
+func (p *BackendServiceCheckWarmUpCacheAsyncArgs) IsSetRequest() bool {
+ return p.Request != nil
+}
+
+func (p *BackendServiceCheckWarmUpCacheAsyncArgs) Read(iprot thrift.TProtocol) (err error) {
var fieldTypeId thrift.TType
var fieldId int16
@@ -11897,10 +23720,21 @@ func (p *BackendServiceGetDiskTrashUsedCapacityArgs) Read(iprot thrift.TProtocol
if fieldTypeId == thrift.STOP {
break
}
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldTypeError
- }
+ switch fieldId {
+ case 1:
+ if fieldTypeId == thrift.STRUCT {
+ if err = p.ReadField1(iprot); err != nil {
+ goto ReadFieldError
+ }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ default:
+ if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ }
if err = iprot.ReadFieldEnd(); err != nil {
goto ReadFieldEndError
}
@@ -11914,8 +23748,10 @@ ReadStructBeginError:
return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err)
ReadFieldBeginError:
return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
-SkipFieldTypeError:
- return thrift.PrependError(fmt.Sprintf("%T skip field type %d error", p, fieldTypeId), err)
+ReadFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceCheckWarmUpCacheAsyncArgs[fieldId]), err)
+SkipFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
ReadFieldEndError:
return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err)
@@ -11923,12 +23759,25 @@ ReadStructEndError:
return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
}
-func (p *BackendServiceGetDiskTrashUsedCapacityArgs) Write(oprot thrift.TProtocol) (err error) {
- if err = oprot.WriteStructBegin("get_disk_trash_used_capacity_args"); err != nil {
+func (p *BackendServiceCheckWarmUpCacheAsyncArgs) ReadField1(iprot thrift.TProtocol) error {
+ _field := NewTCheckWarmUpCacheAsyncRequest()
+ if err := _field.Read(iprot); err != nil {
+ return err
+ }
+ p.Request = _field
+ return nil
+}
+
+func (p *BackendServiceCheckWarmUpCacheAsyncArgs) Write(oprot thrift.TProtocol) (err error) {
+ var fieldId int16
+ if err = oprot.WriteStructBegin("check_warm_up_cache_async_args"); err != nil {
goto WriteStructBeginError
}
if p != nil {
-
+ if err = p.writeField1(oprot); err != nil {
+ fieldId = 1
+ goto WriteFieldError
+ }
}
if err = oprot.WriteFieldStop(); err != nil {
goto WriteFieldStopError
@@ -11939,61 +23788,91 @@ func (p *BackendServiceGetDiskTrashUsedCapacityArgs) Write(oprot thrift.TProtoco
return nil
WriteStructBeginError:
return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+WriteFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err)
WriteFieldStopError:
return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err)
WriteStructEndError:
return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err)
}
-func (p *BackendServiceGetDiskTrashUsedCapacityArgs) String() string {
+func (p *BackendServiceCheckWarmUpCacheAsyncArgs) writeField1(oprot thrift.TProtocol) (err error) {
+ if err = oprot.WriteFieldBegin("request", thrift.STRUCT, 1); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := p.Request.Write(oprot); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err)
+}
+
+func (p *BackendServiceCheckWarmUpCacheAsyncArgs) String() string {
if p == nil {
return ""
}
- return fmt.Sprintf("BackendServiceGetDiskTrashUsedCapacityArgs(%+v)", *p)
+ return fmt.Sprintf("BackendServiceCheckWarmUpCacheAsyncArgs(%+v)", *p)
+
}
-func (p *BackendServiceGetDiskTrashUsedCapacityArgs) DeepEqual(ano *BackendServiceGetDiskTrashUsedCapacityArgs) bool {
+func (p *BackendServiceCheckWarmUpCacheAsyncArgs) DeepEqual(ano *BackendServiceCheckWarmUpCacheAsyncArgs) bool {
if p == ano {
return true
} else if p == nil || ano == nil {
return false
}
+ if !p.Field1DeepEqual(ano.Request) {
+ return false
+ }
return true
}
-type BackendServiceGetDiskTrashUsedCapacityResult struct {
- Success []*TDiskTrashInfo `thrift:"success,0,optional" frugal:"0,optional,list" json:"success,omitempty"`
+func (p *BackendServiceCheckWarmUpCacheAsyncArgs) Field1DeepEqual(src *TCheckWarmUpCacheAsyncRequest) bool {
+
+ if !p.Request.DeepEqual(src) {
+ return false
+ }
+ return true
}
-func NewBackendServiceGetDiskTrashUsedCapacityResult() *BackendServiceGetDiskTrashUsedCapacityResult {
- return &BackendServiceGetDiskTrashUsedCapacityResult{}
+type BackendServiceCheckWarmUpCacheAsyncResult struct {
+ Success *TCheckWarmUpCacheAsyncResponse `thrift:"success,0,optional" frugal:"0,optional,TCheckWarmUpCacheAsyncResponse" json:"success,omitempty"`
}
-func (p *BackendServiceGetDiskTrashUsedCapacityResult) InitDefault() {
- *p = BackendServiceGetDiskTrashUsedCapacityResult{}
+func NewBackendServiceCheckWarmUpCacheAsyncResult() *BackendServiceCheckWarmUpCacheAsyncResult {
+ return &BackendServiceCheckWarmUpCacheAsyncResult{}
}
-var BackendServiceGetDiskTrashUsedCapacityResult_Success_DEFAULT []*TDiskTrashInfo
+func (p *BackendServiceCheckWarmUpCacheAsyncResult) InitDefault() {
+}
-func (p *BackendServiceGetDiskTrashUsedCapacityResult) GetSuccess() (v []*TDiskTrashInfo) {
+var BackendServiceCheckWarmUpCacheAsyncResult_Success_DEFAULT *TCheckWarmUpCacheAsyncResponse
+
+func (p *BackendServiceCheckWarmUpCacheAsyncResult) GetSuccess() (v *TCheckWarmUpCacheAsyncResponse) {
if !p.IsSetSuccess() {
- return BackendServiceGetDiskTrashUsedCapacityResult_Success_DEFAULT
+ return BackendServiceCheckWarmUpCacheAsyncResult_Success_DEFAULT
}
return p.Success
}
-func (p *BackendServiceGetDiskTrashUsedCapacityResult) SetSuccess(x interface{}) {
- p.Success = x.([]*TDiskTrashInfo)
+func (p *BackendServiceCheckWarmUpCacheAsyncResult) SetSuccess(x interface{}) {
+ p.Success = x.(*TCheckWarmUpCacheAsyncResponse)
}
-var fieldIDToName_BackendServiceGetDiskTrashUsedCapacityResult = map[int16]string{
+var fieldIDToName_BackendServiceCheckWarmUpCacheAsyncResult = map[int16]string{
0: "success",
}
-func (p *BackendServiceGetDiskTrashUsedCapacityResult) IsSetSuccess() bool {
+func (p *BackendServiceCheckWarmUpCacheAsyncResult) IsSetSuccess() bool {
return p.Success != nil
}
-func (p *BackendServiceGetDiskTrashUsedCapacityResult) Read(iprot thrift.TProtocol) (err error) {
+func (p *BackendServiceCheckWarmUpCacheAsyncResult) Read(iprot thrift.TProtocol) (err error) {
var fieldTypeId thrift.TType
var fieldId int16
@@ -12013,21 +23892,18 @@ func (p *BackendServiceGetDiskTrashUsedCapacityResult) Read(iprot thrift.TProtoc
switch fieldId {
case 0:
- if fieldTypeId == thrift.LIST {
+ if fieldTypeId == thrift.STRUCT {
if err = p.ReadField0(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
default:
if err = iprot.Skip(fieldTypeId); err != nil {
goto SkipFieldError
}
}
-
if err = iprot.ReadFieldEnd(); err != nil {
goto ReadFieldEndError
}
@@ -12042,7 +23918,7 @@ ReadStructBeginError:
ReadFieldBeginError:
return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
ReadFieldError:
- return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceGetDiskTrashUsedCapacityResult[fieldId]), err)
+ return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceCheckWarmUpCacheAsyncResult[fieldId]), err)
SkipFieldError:
return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
@@ -12052,29 +23928,18 @@ ReadStructEndError:
return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
}
-func (p *BackendServiceGetDiskTrashUsedCapacityResult) ReadField0(iprot thrift.TProtocol) error {
- _, size, err := iprot.ReadListBegin()
- if err != nil {
- return err
- }
- p.Success = make([]*TDiskTrashInfo, 0, size)
- for i := 0; i < size; i++ {
- _elem := NewTDiskTrashInfo()
- if err := _elem.Read(iprot); err != nil {
- return err
- }
-
- p.Success = append(p.Success, _elem)
- }
- if err := iprot.ReadListEnd(); err != nil {
+func (p *BackendServiceCheckWarmUpCacheAsyncResult) ReadField0(iprot thrift.TProtocol) error {
+ _field := NewTCheckWarmUpCacheAsyncResponse()
+ if err := _field.Read(iprot); err != nil {
return err
}
+ p.Success = _field
return nil
}
-func (p *BackendServiceGetDiskTrashUsedCapacityResult) Write(oprot thrift.TProtocol) (err error) {
+func (p *BackendServiceCheckWarmUpCacheAsyncResult) Write(oprot thrift.TProtocol) (err error) {
var fieldId int16
- if err = oprot.WriteStructBegin("get_disk_trash_used_capacity_result"); err != nil {
+ if err = oprot.WriteStructBegin("check_warm_up_cache_async_result"); err != nil {
goto WriteStructBeginError
}
if p != nil {
@@ -12082,7 +23947,6 @@ func (p *BackendServiceGetDiskTrashUsedCapacityResult) Write(oprot thrift.TProto
fieldId = 0
goto WriteFieldError
}
-
}
if err = oprot.WriteFieldStop(); err != nil {
goto WriteFieldStopError
@@ -12101,20 +23965,12 @@ WriteStructEndError:
return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err)
}
-func (p *BackendServiceGetDiskTrashUsedCapacityResult) writeField0(oprot thrift.TProtocol) (err error) {
+func (p *BackendServiceCheckWarmUpCacheAsyncResult) writeField0(oprot thrift.TProtocol) (err error) {
if p.IsSetSuccess() {
- if err = oprot.WriteFieldBegin("success", thrift.LIST, 0); err != nil {
+ if err = oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil {
goto WriteFieldBeginError
}
- if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Success)); err != nil {
- return err
- }
- for _, v := range p.Success {
- if err := v.Write(oprot); err != nil {
- return err
- }
- }
- if err := oprot.WriteListEnd(); err != nil {
+ if err := p.Success.Write(oprot); err != nil {
return err
}
if err = oprot.WriteFieldEnd(); err != nil {
@@ -12128,14 +23984,15 @@ WriteFieldEndError:
return thrift.PrependError(fmt.Sprintf("%T write field 0 end error: ", p), err)
}
-func (p *BackendServiceGetDiskTrashUsedCapacityResult) String() string {
+func (p *BackendServiceCheckWarmUpCacheAsyncResult) String() string {
if p == nil {
return ""
}
- return fmt.Sprintf("BackendServiceGetDiskTrashUsedCapacityResult(%+v)", *p)
+ return fmt.Sprintf("BackendServiceCheckWarmUpCacheAsyncResult(%+v)", *p)
+
}
-func (p *BackendServiceGetDiskTrashUsedCapacityResult) DeepEqual(ano *BackendServiceGetDiskTrashUsedCapacityResult) bool {
+func (p *BackendServiceCheckWarmUpCacheAsyncResult) DeepEqual(ano *BackendServiceCheckWarmUpCacheAsyncResult) bool {
if p == ano {
return true
} else if p == nil || ano == nil {
@@ -12147,44 +24004,46 @@ func (p *BackendServiceGetDiskTrashUsedCapacityResult) DeepEqual(ano *BackendSer
return true
}
-func (p *BackendServiceGetDiskTrashUsedCapacityResult) Field0DeepEqual(src []*TDiskTrashInfo) bool {
+func (p *BackendServiceCheckWarmUpCacheAsyncResult) Field0DeepEqual(src *TCheckWarmUpCacheAsyncResponse) bool {
- if len(p.Success) != len(src) {
+ if !p.Success.DeepEqual(src) {
return false
}
- for i, v := range p.Success {
- _src := src[i]
- if !v.DeepEqual(_src) {
- return false
- }
- }
return true
}
-type BackendServiceSubmitRoutineLoadTaskArgs struct {
- Tasks []*TRoutineLoadTask `thrift:"tasks,1" frugal:"1,default,list" json:"tasks"`
+type BackendServiceSyncLoadForTabletsArgs struct {
+ Request *TSyncLoadForTabletsRequest `thrift:"request,1" frugal:"1,default,TSyncLoadForTabletsRequest" json:"request"`
}
-func NewBackendServiceSubmitRoutineLoadTaskArgs() *BackendServiceSubmitRoutineLoadTaskArgs {
- return &BackendServiceSubmitRoutineLoadTaskArgs{}
+func NewBackendServiceSyncLoadForTabletsArgs() *BackendServiceSyncLoadForTabletsArgs {
+ return &BackendServiceSyncLoadForTabletsArgs{}
}
-func (p *BackendServiceSubmitRoutineLoadTaskArgs) InitDefault() {
- *p = BackendServiceSubmitRoutineLoadTaskArgs{}
+func (p *BackendServiceSyncLoadForTabletsArgs) InitDefault() {
}
-func (p *BackendServiceSubmitRoutineLoadTaskArgs) GetTasks() (v []*TRoutineLoadTask) {
- return p.Tasks
+var BackendServiceSyncLoadForTabletsArgs_Request_DEFAULT *TSyncLoadForTabletsRequest
+
+func (p *BackendServiceSyncLoadForTabletsArgs) GetRequest() (v *TSyncLoadForTabletsRequest) {
+ if !p.IsSetRequest() {
+ return BackendServiceSyncLoadForTabletsArgs_Request_DEFAULT
+ }
+ return p.Request
}
-func (p *BackendServiceSubmitRoutineLoadTaskArgs) SetTasks(val []*TRoutineLoadTask) {
- p.Tasks = val
+func (p *BackendServiceSyncLoadForTabletsArgs) SetRequest(val *TSyncLoadForTabletsRequest) {
+ p.Request = val
}
-var fieldIDToName_BackendServiceSubmitRoutineLoadTaskArgs = map[int16]string{
- 1: "tasks",
+var fieldIDToName_BackendServiceSyncLoadForTabletsArgs = map[int16]string{
+ 1: "request",
}
-func (p *BackendServiceSubmitRoutineLoadTaskArgs) Read(iprot thrift.TProtocol) (err error) {
+func (p *BackendServiceSyncLoadForTabletsArgs) IsSetRequest() bool {
+ return p.Request != nil
+}
+
+func (p *BackendServiceSyncLoadForTabletsArgs) Read(iprot thrift.TProtocol) (err error) {
var fieldTypeId thrift.TType
var fieldId int16
@@ -12204,21 +24063,18 @@ func (p *BackendServiceSubmitRoutineLoadTaskArgs) Read(iprot thrift.TProtocol) (
switch fieldId {
case 1:
- if fieldTypeId == thrift.LIST {
+ if fieldTypeId == thrift.STRUCT {
if err = p.ReadField1(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
default:
if err = iprot.Skip(fieldTypeId); err != nil {
goto SkipFieldError
}
}
-
if err = iprot.ReadFieldEnd(); err != nil {
goto ReadFieldEndError
}
@@ -12233,7 +24089,7 @@ ReadStructBeginError:
ReadFieldBeginError:
return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
ReadFieldError:
- return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceSubmitRoutineLoadTaskArgs[fieldId]), err)
+ return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceSyncLoadForTabletsArgs[fieldId]), err)
SkipFieldError:
return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
@@ -12243,29 +24099,18 @@ ReadStructEndError:
return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
}
-func (p *BackendServiceSubmitRoutineLoadTaskArgs) ReadField1(iprot thrift.TProtocol) error {
- _, size, err := iprot.ReadListBegin()
- if err != nil {
- return err
- }
- p.Tasks = make([]*TRoutineLoadTask, 0, size)
- for i := 0; i < size; i++ {
- _elem := NewTRoutineLoadTask()
- if err := _elem.Read(iprot); err != nil {
- return err
- }
-
- p.Tasks = append(p.Tasks, _elem)
- }
- if err := iprot.ReadListEnd(); err != nil {
+func (p *BackendServiceSyncLoadForTabletsArgs) ReadField1(iprot thrift.TProtocol) error {
+ _field := NewTSyncLoadForTabletsRequest()
+ if err := _field.Read(iprot); err != nil {
return err
}
+ p.Request = _field
return nil
}
-func (p *BackendServiceSubmitRoutineLoadTaskArgs) Write(oprot thrift.TProtocol) (err error) {
+func (p *BackendServiceSyncLoadForTabletsArgs) Write(oprot thrift.TProtocol) (err error) {
var fieldId int16
- if err = oprot.WriteStructBegin("submit_routine_load_task_args"); err != nil {
+ if err = oprot.WriteStructBegin("sync_load_for_tablets_args"); err != nil {
goto WriteStructBeginError
}
if p != nil {
@@ -12273,7 +24118,6 @@ func (p *BackendServiceSubmitRoutineLoadTaskArgs) Write(oprot thrift.TProtocol)
fieldId = 1
goto WriteFieldError
}
-
}
if err = oprot.WriteFieldStop(); err != nil {
goto WriteFieldStopError
@@ -12292,19 +24136,11 @@ WriteStructEndError:
return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err)
}
-func (p *BackendServiceSubmitRoutineLoadTaskArgs) writeField1(oprot thrift.TProtocol) (err error) {
- if err = oprot.WriteFieldBegin("tasks", thrift.LIST, 1); err != nil {
+func (p *BackendServiceSyncLoadForTabletsArgs) writeField1(oprot thrift.TProtocol) (err error) {
+ if err = oprot.WriteFieldBegin("request", thrift.STRUCT, 1); err != nil {
goto WriteFieldBeginError
}
- if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Tasks)); err != nil {
- return err
- }
- for _, v := range p.Tasks {
- if err := v.Write(oprot); err != nil {
- return err
- }
- }
- if err := oprot.WriteListEnd(); err != nil {
+ if err := p.Request.Write(oprot); err != nil {
return err
}
if err = oprot.WriteFieldEnd(); err != nil {
@@ -12317,72 +24153,66 @@ WriteFieldEndError:
return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err)
}
-func (p *BackendServiceSubmitRoutineLoadTaskArgs) String() string {
+func (p *BackendServiceSyncLoadForTabletsArgs) String() string {
if p == nil {
return ""
}
- return fmt.Sprintf("BackendServiceSubmitRoutineLoadTaskArgs(%+v)", *p)
+ return fmt.Sprintf("BackendServiceSyncLoadForTabletsArgs(%+v)", *p)
+
}
-func (p *BackendServiceSubmitRoutineLoadTaskArgs) DeepEqual(ano *BackendServiceSubmitRoutineLoadTaskArgs) bool {
+func (p *BackendServiceSyncLoadForTabletsArgs) DeepEqual(ano *BackendServiceSyncLoadForTabletsArgs) bool {
if p == ano {
return true
} else if p == nil || ano == nil {
return false
}
- if !p.Field1DeepEqual(ano.Tasks) {
+ if !p.Field1DeepEqual(ano.Request) {
return false
}
return true
}
-func (p *BackendServiceSubmitRoutineLoadTaskArgs) Field1DeepEqual(src []*TRoutineLoadTask) bool {
+func (p *BackendServiceSyncLoadForTabletsArgs) Field1DeepEqual(src *TSyncLoadForTabletsRequest) bool {
- if len(p.Tasks) != len(src) {
+ if !p.Request.DeepEqual(src) {
return false
}
- for i, v := range p.Tasks {
- _src := src[i]
- if !v.DeepEqual(_src) {
- return false
- }
- }
return true
}
-type BackendServiceSubmitRoutineLoadTaskResult struct {
- Success *status.TStatus `thrift:"success,0,optional" frugal:"0,optional,status.TStatus" json:"success,omitempty"`
+type BackendServiceSyncLoadForTabletsResult struct {
+ Success *TSyncLoadForTabletsResponse `thrift:"success,0,optional" frugal:"0,optional,TSyncLoadForTabletsResponse" json:"success,omitempty"`
}
-func NewBackendServiceSubmitRoutineLoadTaskResult() *BackendServiceSubmitRoutineLoadTaskResult {
- return &BackendServiceSubmitRoutineLoadTaskResult{}
+func NewBackendServiceSyncLoadForTabletsResult() *BackendServiceSyncLoadForTabletsResult {
+ return &BackendServiceSyncLoadForTabletsResult{}
}
-func (p *BackendServiceSubmitRoutineLoadTaskResult) InitDefault() {
- *p = BackendServiceSubmitRoutineLoadTaskResult{}
+func (p *BackendServiceSyncLoadForTabletsResult) InitDefault() {
}
-var BackendServiceSubmitRoutineLoadTaskResult_Success_DEFAULT *status.TStatus
+var BackendServiceSyncLoadForTabletsResult_Success_DEFAULT *TSyncLoadForTabletsResponse
-func (p *BackendServiceSubmitRoutineLoadTaskResult) GetSuccess() (v *status.TStatus) {
+func (p *BackendServiceSyncLoadForTabletsResult) GetSuccess() (v *TSyncLoadForTabletsResponse) {
if !p.IsSetSuccess() {
- return BackendServiceSubmitRoutineLoadTaskResult_Success_DEFAULT
+ return BackendServiceSyncLoadForTabletsResult_Success_DEFAULT
}
return p.Success
}
-func (p *BackendServiceSubmitRoutineLoadTaskResult) SetSuccess(x interface{}) {
- p.Success = x.(*status.TStatus)
+func (p *BackendServiceSyncLoadForTabletsResult) SetSuccess(x interface{}) {
+ p.Success = x.(*TSyncLoadForTabletsResponse)
}
-var fieldIDToName_BackendServiceSubmitRoutineLoadTaskResult = map[int16]string{
+var fieldIDToName_BackendServiceSyncLoadForTabletsResult = map[int16]string{
0: "success",
}
-func (p *BackendServiceSubmitRoutineLoadTaskResult) IsSetSuccess() bool {
+func (p *BackendServiceSyncLoadForTabletsResult) IsSetSuccess() bool {
return p.Success != nil
}
-func (p *BackendServiceSubmitRoutineLoadTaskResult) Read(iprot thrift.TProtocol) (err error) {
+func (p *BackendServiceSyncLoadForTabletsResult) Read(iprot thrift.TProtocol) (err error) {
var fieldTypeId thrift.TType
var fieldId int16
@@ -12406,17 +24236,14 @@ func (p *BackendServiceSubmitRoutineLoadTaskResult) Read(iprot thrift.TProtocol)
if err = p.ReadField0(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
default:
if err = iprot.Skip(fieldTypeId); err != nil {
goto SkipFieldError
}
}
-
if err = iprot.ReadFieldEnd(); err != nil {
goto ReadFieldEndError
}
@@ -12431,7 +24258,7 @@ ReadStructBeginError:
ReadFieldBeginError:
return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
ReadFieldError:
- return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceSubmitRoutineLoadTaskResult[fieldId]), err)
+ return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceSyncLoadForTabletsResult[fieldId]), err)
SkipFieldError:
return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
@@ -12441,17 +24268,18 @@ ReadStructEndError:
return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
}
-func (p *BackendServiceSubmitRoutineLoadTaskResult) ReadField0(iprot thrift.TProtocol) error {
- p.Success = status.NewTStatus()
- if err := p.Success.Read(iprot); err != nil {
+func (p *BackendServiceSyncLoadForTabletsResult) ReadField0(iprot thrift.TProtocol) error {
+ _field := NewTSyncLoadForTabletsResponse()
+ if err := _field.Read(iprot); err != nil {
return err
}
+ p.Success = _field
return nil
}
-func (p *BackendServiceSubmitRoutineLoadTaskResult) Write(oprot thrift.TProtocol) (err error) {
+func (p *BackendServiceSyncLoadForTabletsResult) Write(oprot thrift.TProtocol) (err error) {
var fieldId int16
- if err = oprot.WriteStructBegin("submit_routine_load_task_result"); err != nil {
+ if err = oprot.WriteStructBegin("sync_load_for_tablets_result"); err != nil {
goto WriteStructBeginError
}
if p != nil {
@@ -12459,7 +24287,6 @@ func (p *BackendServiceSubmitRoutineLoadTaskResult) Write(oprot thrift.TProtocol
fieldId = 0
goto WriteFieldError
}
-
}
if err = oprot.WriteFieldStop(); err != nil {
goto WriteFieldStopError
@@ -12478,7 +24305,7 @@ WriteStructEndError:
return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err)
}
-func (p *BackendServiceSubmitRoutineLoadTaskResult) writeField0(oprot thrift.TProtocol) (err error) {
+func (p *BackendServiceSyncLoadForTabletsResult) writeField0(oprot thrift.TProtocol) (err error) {
if p.IsSetSuccess() {
if err = oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil {
goto WriteFieldBeginError
@@ -12497,14 +24324,15 @@ WriteFieldEndError:
return thrift.PrependError(fmt.Sprintf("%T write field 0 end error: ", p), err)
}
-func (p *BackendServiceSubmitRoutineLoadTaskResult) String() string {
+func (p *BackendServiceSyncLoadForTabletsResult) String() string {
if p == nil {
return ""
}
- return fmt.Sprintf("BackendServiceSubmitRoutineLoadTaskResult(%+v)", *p)
+ return fmt.Sprintf("BackendServiceSyncLoadForTabletsResult(%+v)", *p)
+
}
-func (p *BackendServiceSubmitRoutineLoadTaskResult) DeepEqual(ano *BackendServiceSubmitRoutineLoadTaskResult) bool {
+func (p *BackendServiceSyncLoadForTabletsResult) DeepEqual(ano *BackendServiceSyncLoadForTabletsResult) bool {
if p == ano {
return true
} else if p == nil || ano == nil {
@@ -12516,7 +24344,7 @@ func (p *BackendServiceSubmitRoutineLoadTaskResult) DeepEqual(ano *BackendServic
return true
}
-func (p *BackendServiceSubmitRoutineLoadTaskResult) Field0DeepEqual(src *status.TStatus) bool {
+func (p *BackendServiceSyncLoadForTabletsResult) Field0DeepEqual(src *TSyncLoadForTabletsResponse) bool {
if !p.Success.DeepEqual(src) {
return false
@@ -12524,39 +24352,38 @@ func (p *BackendServiceSubmitRoutineLoadTaskResult) Field0DeepEqual(src *status.
return true
}
-type BackendServiceOpenScannerArgs struct {
- Params *dorisexternalservice.TScanOpenParams `thrift:"params,1" frugal:"1,default,dorisexternalservice.TScanOpenParams" json:"params"`
+type BackendServiceGetTopNHotPartitionsArgs struct {
+ Request *TGetTopNHotPartitionsRequest `thrift:"request,1" frugal:"1,default,TGetTopNHotPartitionsRequest" json:"request"`
}
-func NewBackendServiceOpenScannerArgs() *BackendServiceOpenScannerArgs {
- return &BackendServiceOpenScannerArgs{}
+func NewBackendServiceGetTopNHotPartitionsArgs() *BackendServiceGetTopNHotPartitionsArgs {
+ return &BackendServiceGetTopNHotPartitionsArgs{}
}
-func (p *BackendServiceOpenScannerArgs) InitDefault() {
- *p = BackendServiceOpenScannerArgs{}
+func (p *BackendServiceGetTopNHotPartitionsArgs) InitDefault() {
}
-var BackendServiceOpenScannerArgs_Params_DEFAULT *dorisexternalservice.TScanOpenParams
+var BackendServiceGetTopNHotPartitionsArgs_Request_DEFAULT *TGetTopNHotPartitionsRequest
-func (p *BackendServiceOpenScannerArgs) GetParams() (v *dorisexternalservice.TScanOpenParams) {
- if !p.IsSetParams() {
- return BackendServiceOpenScannerArgs_Params_DEFAULT
+func (p *BackendServiceGetTopNHotPartitionsArgs) GetRequest() (v *TGetTopNHotPartitionsRequest) {
+ if !p.IsSetRequest() {
+ return BackendServiceGetTopNHotPartitionsArgs_Request_DEFAULT
}
- return p.Params
+ return p.Request
}
-func (p *BackendServiceOpenScannerArgs) SetParams(val *dorisexternalservice.TScanOpenParams) {
- p.Params = val
+func (p *BackendServiceGetTopNHotPartitionsArgs) SetRequest(val *TGetTopNHotPartitionsRequest) {
+ p.Request = val
}
-var fieldIDToName_BackendServiceOpenScannerArgs = map[int16]string{
- 1: "params",
+var fieldIDToName_BackendServiceGetTopNHotPartitionsArgs = map[int16]string{
+ 1: "request",
}
-func (p *BackendServiceOpenScannerArgs) IsSetParams() bool {
- return p.Params != nil
+func (p *BackendServiceGetTopNHotPartitionsArgs) IsSetRequest() bool {
+ return p.Request != nil
}
-func (p *BackendServiceOpenScannerArgs) Read(iprot thrift.TProtocol) (err error) {
+func (p *BackendServiceGetTopNHotPartitionsArgs) Read(iprot thrift.TProtocol) (err error) {
var fieldTypeId thrift.TType
var fieldId int16
@@ -12580,17 +24407,14 @@ func (p *BackendServiceOpenScannerArgs) Read(iprot thrift.TProtocol) (err error)
if err = p.ReadField1(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
default:
if err = iprot.Skip(fieldTypeId); err != nil {
goto SkipFieldError
}
}
-
if err = iprot.ReadFieldEnd(); err != nil {
goto ReadFieldEndError
}
@@ -12605,7 +24429,7 @@ ReadStructBeginError:
ReadFieldBeginError:
return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
ReadFieldError:
- return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceOpenScannerArgs[fieldId]), err)
+ return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceGetTopNHotPartitionsArgs[fieldId]), err)
SkipFieldError:
return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
@@ -12615,17 +24439,18 @@ ReadStructEndError:
return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
}
-func (p *BackendServiceOpenScannerArgs) ReadField1(iprot thrift.TProtocol) error {
- p.Params = dorisexternalservice.NewTScanOpenParams()
- if err := p.Params.Read(iprot); err != nil {
+func (p *BackendServiceGetTopNHotPartitionsArgs) ReadField1(iprot thrift.TProtocol) error {
+ _field := NewTGetTopNHotPartitionsRequest()
+ if err := _field.Read(iprot); err != nil {
return err
}
+ p.Request = _field
return nil
}
-func (p *BackendServiceOpenScannerArgs) Write(oprot thrift.TProtocol) (err error) {
+func (p *BackendServiceGetTopNHotPartitionsArgs) Write(oprot thrift.TProtocol) (err error) {
var fieldId int16
- if err = oprot.WriteStructBegin("open_scanner_args"); err != nil {
+ if err = oprot.WriteStructBegin("get_top_n_hot_partitions_args"); err != nil {
goto WriteStructBeginError
}
if p != nil {
@@ -12633,7 +24458,6 @@ func (p *BackendServiceOpenScannerArgs) Write(oprot thrift.TProtocol) (err error
fieldId = 1
goto WriteFieldError
}
-
}
if err = oprot.WriteFieldStop(); err != nil {
goto WriteFieldStopError
@@ -12652,11 +24476,11 @@ WriteStructEndError:
return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err)
}
-func (p *BackendServiceOpenScannerArgs) writeField1(oprot thrift.TProtocol) (err error) {
- if err = oprot.WriteFieldBegin("params", thrift.STRUCT, 1); err != nil {
+func (p *BackendServiceGetTopNHotPartitionsArgs) writeField1(oprot thrift.TProtocol) (err error) {
+ if err = oprot.WriteFieldBegin("request", thrift.STRUCT, 1); err != nil {
goto WriteFieldBeginError
}
- if err := p.Params.Write(oprot); err != nil {
+ if err := p.Request.Write(oprot); err != nil {
return err
}
if err = oprot.WriteFieldEnd(); err != nil {
@@ -12669,66 +24493,66 @@ WriteFieldEndError:
return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err)
}
-func (p *BackendServiceOpenScannerArgs) String() string {
+func (p *BackendServiceGetTopNHotPartitionsArgs) String() string {
if p == nil {
return ""
}
- return fmt.Sprintf("BackendServiceOpenScannerArgs(%+v)", *p)
+ return fmt.Sprintf("BackendServiceGetTopNHotPartitionsArgs(%+v)", *p)
+
}
-func (p *BackendServiceOpenScannerArgs) DeepEqual(ano *BackendServiceOpenScannerArgs) bool {
+func (p *BackendServiceGetTopNHotPartitionsArgs) DeepEqual(ano *BackendServiceGetTopNHotPartitionsArgs) bool {
if p == ano {
return true
} else if p == nil || ano == nil {
return false
}
- if !p.Field1DeepEqual(ano.Params) {
+ if !p.Field1DeepEqual(ano.Request) {
return false
}
return true
}
-func (p *BackendServiceOpenScannerArgs) Field1DeepEqual(src *dorisexternalservice.TScanOpenParams) bool {
+func (p *BackendServiceGetTopNHotPartitionsArgs) Field1DeepEqual(src *TGetTopNHotPartitionsRequest) bool {
- if !p.Params.DeepEqual(src) {
+ if !p.Request.DeepEqual(src) {
return false
}
return true
}
-type BackendServiceOpenScannerResult struct {
- Success *dorisexternalservice.TScanOpenResult_ `thrift:"success,0,optional" frugal:"0,optional,dorisexternalservice.TScanOpenResult_" json:"success,omitempty"`
+type BackendServiceGetTopNHotPartitionsResult struct {
+ Success *TGetTopNHotPartitionsResponse `thrift:"success,0,optional" frugal:"0,optional,TGetTopNHotPartitionsResponse" json:"success,omitempty"`
}
-func NewBackendServiceOpenScannerResult() *BackendServiceOpenScannerResult {
- return &BackendServiceOpenScannerResult{}
+func NewBackendServiceGetTopNHotPartitionsResult() *BackendServiceGetTopNHotPartitionsResult {
+ return &BackendServiceGetTopNHotPartitionsResult{}
}
-func (p *BackendServiceOpenScannerResult) InitDefault() {
- *p = BackendServiceOpenScannerResult{}
+func (p *BackendServiceGetTopNHotPartitionsResult) InitDefault() {
}
-var BackendServiceOpenScannerResult_Success_DEFAULT *dorisexternalservice.TScanOpenResult_
+var BackendServiceGetTopNHotPartitionsResult_Success_DEFAULT *TGetTopNHotPartitionsResponse
-func (p *BackendServiceOpenScannerResult) GetSuccess() (v *dorisexternalservice.TScanOpenResult_) {
+func (p *BackendServiceGetTopNHotPartitionsResult) GetSuccess() (v *TGetTopNHotPartitionsResponse) {
if !p.IsSetSuccess() {
- return BackendServiceOpenScannerResult_Success_DEFAULT
+ return BackendServiceGetTopNHotPartitionsResult_Success_DEFAULT
}
return p.Success
}
-func (p *BackendServiceOpenScannerResult) SetSuccess(x interface{}) {
- p.Success = x.(*dorisexternalservice.TScanOpenResult_)
+func (p *BackendServiceGetTopNHotPartitionsResult) SetSuccess(x interface{}) {
+ p.Success = x.(*TGetTopNHotPartitionsResponse)
}
-var fieldIDToName_BackendServiceOpenScannerResult = map[int16]string{
+var fieldIDToName_BackendServiceGetTopNHotPartitionsResult = map[int16]string{
0: "success",
}
-func (p *BackendServiceOpenScannerResult) IsSetSuccess() bool {
+func (p *BackendServiceGetTopNHotPartitionsResult) IsSetSuccess() bool {
return p.Success != nil
}
-func (p *BackendServiceOpenScannerResult) Read(iprot thrift.TProtocol) (err error) {
+func (p *BackendServiceGetTopNHotPartitionsResult) Read(iprot thrift.TProtocol) (err error) {
var fieldTypeId thrift.TType
var fieldId int16
@@ -12752,17 +24576,14 @@ func (p *BackendServiceOpenScannerResult) Read(iprot thrift.TProtocol) (err erro
if err = p.ReadField0(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
default:
if err = iprot.Skip(fieldTypeId); err != nil {
goto SkipFieldError
}
}
-
if err = iprot.ReadFieldEnd(); err != nil {
goto ReadFieldEndError
}
@@ -12777,7 +24598,7 @@ ReadStructBeginError:
ReadFieldBeginError:
return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
ReadFieldError:
- return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceOpenScannerResult[fieldId]), err)
+ return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceGetTopNHotPartitionsResult[fieldId]), err)
SkipFieldError:
return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
@@ -12787,17 +24608,18 @@ ReadStructEndError:
return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
}
-func (p *BackendServiceOpenScannerResult) ReadField0(iprot thrift.TProtocol) error {
- p.Success = dorisexternalservice.NewTScanOpenResult_()
- if err := p.Success.Read(iprot); err != nil {
+func (p *BackendServiceGetTopNHotPartitionsResult) ReadField0(iprot thrift.TProtocol) error {
+ _field := NewTGetTopNHotPartitionsResponse()
+ if err := _field.Read(iprot); err != nil {
return err
}
+ p.Success = _field
return nil
}
-func (p *BackendServiceOpenScannerResult) Write(oprot thrift.TProtocol) (err error) {
+func (p *BackendServiceGetTopNHotPartitionsResult) Write(oprot thrift.TProtocol) (err error) {
var fieldId int16
- if err = oprot.WriteStructBegin("open_scanner_result"); err != nil {
+ if err = oprot.WriteStructBegin("get_top_n_hot_partitions_result"); err != nil {
goto WriteStructBeginError
}
if p != nil {
@@ -12805,7 +24627,6 @@ func (p *BackendServiceOpenScannerResult) Write(oprot thrift.TProtocol) (err err
fieldId = 0
goto WriteFieldError
}
-
}
if err = oprot.WriteFieldStop(); err != nil {
goto WriteFieldStopError
@@ -12824,7 +24645,7 @@ WriteStructEndError:
return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err)
}
-func (p *BackendServiceOpenScannerResult) writeField0(oprot thrift.TProtocol) (err error) {
+func (p *BackendServiceGetTopNHotPartitionsResult) writeField0(oprot thrift.TProtocol) (err error) {
if p.IsSetSuccess() {
if err = oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil {
goto WriteFieldBeginError
@@ -12843,14 +24664,15 @@ WriteFieldEndError:
return thrift.PrependError(fmt.Sprintf("%T write field 0 end error: ", p), err)
}
-func (p *BackendServiceOpenScannerResult) String() string {
+func (p *BackendServiceGetTopNHotPartitionsResult) String() string {
if p == nil {
return ""
}
- return fmt.Sprintf("BackendServiceOpenScannerResult(%+v)", *p)
+ return fmt.Sprintf("BackendServiceGetTopNHotPartitionsResult(%+v)", *p)
+
}
-func (p *BackendServiceOpenScannerResult) DeepEqual(ano *BackendServiceOpenScannerResult) bool {
+func (p *BackendServiceGetTopNHotPartitionsResult) DeepEqual(ano *BackendServiceGetTopNHotPartitionsResult) bool {
if p == ano {
return true
} else if p == nil || ano == nil {
@@ -12862,7 +24684,7 @@ func (p *BackendServiceOpenScannerResult) DeepEqual(ano *BackendServiceOpenScann
return true
}
-func (p *BackendServiceOpenScannerResult) Field0DeepEqual(src *dorisexternalservice.TScanOpenResult_) bool {
+func (p *BackendServiceGetTopNHotPartitionsResult) Field0DeepEqual(src *TGetTopNHotPartitionsResponse) bool {
if !p.Success.DeepEqual(src) {
return false
@@ -12870,39 +24692,38 @@ func (p *BackendServiceOpenScannerResult) Field0DeepEqual(src *dorisexternalserv
return true
}
-type BackendServiceGetNextArgs struct {
- Params *dorisexternalservice.TScanNextBatchParams `thrift:"params,1" frugal:"1,default,dorisexternalservice.TScanNextBatchParams" json:"params"`
+type BackendServiceWarmUpTabletsArgs struct {
+ Request *TWarmUpTabletsRequest `thrift:"request,1" frugal:"1,default,TWarmUpTabletsRequest" json:"request"`
}
-func NewBackendServiceGetNextArgs() *BackendServiceGetNextArgs {
- return &BackendServiceGetNextArgs{}
+func NewBackendServiceWarmUpTabletsArgs() *BackendServiceWarmUpTabletsArgs {
+ return &BackendServiceWarmUpTabletsArgs{}
}
-func (p *BackendServiceGetNextArgs) InitDefault() {
- *p = BackendServiceGetNextArgs{}
+func (p *BackendServiceWarmUpTabletsArgs) InitDefault() {
}
-var BackendServiceGetNextArgs_Params_DEFAULT *dorisexternalservice.TScanNextBatchParams
+var BackendServiceWarmUpTabletsArgs_Request_DEFAULT *TWarmUpTabletsRequest
-func (p *BackendServiceGetNextArgs) GetParams() (v *dorisexternalservice.TScanNextBatchParams) {
- if !p.IsSetParams() {
- return BackendServiceGetNextArgs_Params_DEFAULT
+func (p *BackendServiceWarmUpTabletsArgs) GetRequest() (v *TWarmUpTabletsRequest) {
+ if !p.IsSetRequest() {
+ return BackendServiceWarmUpTabletsArgs_Request_DEFAULT
}
- return p.Params
+ return p.Request
}
-func (p *BackendServiceGetNextArgs) SetParams(val *dorisexternalservice.TScanNextBatchParams) {
- p.Params = val
+func (p *BackendServiceWarmUpTabletsArgs) SetRequest(val *TWarmUpTabletsRequest) {
+ p.Request = val
}
-var fieldIDToName_BackendServiceGetNextArgs = map[int16]string{
- 1: "params",
+var fieldIDToName_BackendServiceWarmUpTabletsArgs = map[int16]string{
+ 1: "request",
}
-func (p *BackendServiceGetNextArgs) IsSetParams() bool {
- return p.Params != nil
+func (p *BackendServiceWarmUpTabletsArgs) IsSetRequest() bool {
+ return p.Request != nil
}
-func (p *BackendServiceGetNextArgs) Read(iprot thrift.TProtocol) (err error) {
+func (p *BackendServiceWarmUpTabletsArgs) Read(iprot thrift.TProtocol) (err error) {
var fieldTypeId thrift.TType
var fieldId int16
@@ -12926,17 +24747,14 @@ func (p *BackendServiceGetNextArgs) Read(iprot thrift.TProtocol) (err error) {
if err = p.ReadField1(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
default:
if err = iprot.Skip(fieldTypeId); err != nil {
goto SkipFieldError
}
}
-
if err = iprot.ReadFieldEnd(); err != nil {
goto ReadFieldEndError
}
@@ -12951,7 +24769,7 @@ ReadStructBeginError:
ReadFieldBeginError:
return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
ReadFieldError:
- return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceGetNextArgs[fieldId]), err)
+ return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceWarmUpTabletsArgs[fieldId]), err)
SkipFieldError:
return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
@@ -12961,17 +24779,18 @@ ReadStructEndError:
return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
}
-func (p *BackendServiceGetNextArgs) ReadField1(iprot thrift.TProtocol) error {
- p.Params = dorisexternalservice.NewTScanNextBatchParams()
- if err := p.Params.Read(iprot); err != nil {
+func (p *BackendServiceWarmUpTabletsArgs) ReadField1(iprot thrift.TProtocol) error {
+ _field := NewTWarmUpTabletsRequest()
+ if err := _field.Read(iprot); err != nil {
return err
}
+ p.Request = _field
return nil
}
-func (p *BackendServiceGetNextArgs) Write(oprot thrift.TProtocol) (err error) {
+func (p *BackendServiceWarmUpTabletsArgs) Write(oprot thrift.TProtocol) (err error) {
var fieldId int16
- if err = oprot.WriteStructBegin("get_next_args"); err != nil {
+ if err = oprot.WriteStructBegin("warm_up_tablets_args"); err != nil {
goto WriteStructBeginError
}
if p != nil {
@@ -12979,7 +24798,6 @@ func (p *BackendServiceGetNextArgs) Write(oprot thrift.TProtocol) (err error) {
fieldId = 1
goto WriteFieldError
}
-
}
if err = oprot.WriteFieldStop(); err != nil {
goto WriteFieldStopError
@@ -12998,11 +24816,11 @@ WriteStructEndError:
return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err)
}
-func (p *BackendServiceGetNextArgs) writeField1(oprot thrift.TProtocol) (err error) {
- if err = oprot.WriteFieldBegin("params", thrift.STRUCT, 1); err != nil {
+func (p *BackendServiceWarmUpTabletsArgs) writeField1(oprot thrift.TProtocol) (err error) {
+ if err = oprot.WriteFieldBegin("request", thrift.STRUCT, 1); err != nil {
goto WriteFieldBeginError
}
- if err := p.Params.Write(oprot); err != nil {
+ if err := p.Request.Write(oprot); err != nil {
return err
}
if err = oprot.WriteFieldEnd(); err != nil {
@@ -13015,66 +24833,66 @@ WriteFieldEndError:
return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err)
}
-func (p *BackendServiceGetNextArgs) String() string {
+func (p *BackendServiceWarmUpTabletsArgs) String() string {
if p == nil {
return ""
}
- return fmt.Sprintf("BackendServiceGetNextArgs(%+v)", *p)
+ return fmt.Sprintf("BackendServiceWarmUpTabletsArgs(%+v)", *p)
+
}
-func (p *BackendServiceGetNextArgs) DeepEqual(ano *BackendServiceGetNextArgs) bool {
+func (p *BackendServiceWarmUpTabletsArgs) DeepEqual(ano *BackendServiceWarmUpTabletsArgs) bool {
if p == ano {
return true
} else if p == nil || ano == nil {
return false
}
- if !p.Field1DeepEqual(ano.Params) {
+ if !p.Field1DeepEqual(ano.Request) {
return false
}
return true
}
-func (p *BackendServiceGetNextArgs) Field1DeepEqual(src *dorisexternalservice.TScanNextBatchParams) bool {
+func (p *BackendServiceWarmUpTabletsArgs) Field1DeepEqual(src *TWarmUpTabletsRequest) bool {
- if !p.Params.DeepEqual(src) {
+ if !p.Request.DeepEqual(src) {
return false
}
return true
}
-type BackendServiceGetNextResult struct {
- Success *dorisexternalservice.TScanBatchResult_ `thrift:"success,0,optional" frugal:"0,optional,dorisexternalservice.TScanBatchResult_" json:"success,omitempty"`
+type BackendServiceWarmUpTabletsResult struct {
+ Success *TWarmUpTabletsResponse `thrift:"success,0,optional" frugal:"0,optional,TWarmUpTabletsResponse" json:"success,omitempty"`
}
-func NewBackendServiceGetNextResult() *BackendServiceGetNextResult {
- return &BackendServiceGetNextResult{}
+func NewBackendServiceWarmUpTabletsResult() *BackendServiceWarmUpTabletsResult {
+ return &BackendServiceWarmUpTabletsResult{}
}
-func (p *BackendServiceGetNextResult) InitDefault() {
- *p = BackendServiceGetNextResult{}
+func (p *BackendServiceWarmUpTabletsResult) InitDefault() {
}
-var BackendServiceGetNextResult_Success_DEFAULT *dorisexternalservice.TScanBatchResult_
+var BackendServiceWarmUpTabletsResult_Success_DEFAULT *TWarmUpTabletsResponse
-func (p *BackendServiceGetNextResult) GetSuccess() (v *dorisexternalservice.TScanBatchResult_) {
+func (p *BackendServiceWarmUpTabletsResult) GetSuccess() (v *TWarmUpTabletsResponse) {
if !p.IsSetSuccess() {
- return BackendServiceGetNextResult_Success_DEFAULT
+ return BackendServiceWarmUpTabletsResult_Success_DEFAULT
}
return p.Success
}
-func (p *BackendServiceGetNextResult) SetSuccess(x interface{}) {
- p.Success = x.(*dorisexternalservice.TScanBatchResult_)
+func (p *BackendServiceWarmUpTabletsResult) SetSuccess(x interface{}) {
+ p.Success = x.(*TWarmUpTabletsResponse)
}
-var fieldIDToName_BackendServiceGetNextResult = map[int16]string{
+var fieldIDToName_BackendServiceWarmUpTabletsResult = map[int16]string{
0: "success",
}
-func (p *BackendServiceGetNextResult) IsSetSuccess() bool {
+func (p *BackendServiceWarmUpTabletsResult) IsSetSuccess() bool {
return p.Success != nil
}
-func (p *BackendServiceGetNextResult) Read(iprot thrift.TProtocol) (err error) {
+func (p *BackendServiceWarmUpTabletsResult) Read(iprot thrift.TProtocol) (err error) {
var fieldTypeId thrift.TType
var fieldId int16
@@ -13098,17 +24916,14 @@ func (p *BackendServiceGetNextResult) Read(iprot thrift.TProtocol) (err error) {
if err = p.ReadField0(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
default:
if err = iprot.Skip(fieldTypeId); err != nil {
goto SkipFieldError
}
}
-
if err = iprot.ReadFieldEnd(); err != nil {
goto ReadFieldEndError
}
@@ -13123,7 +24938,7 @@ ReadStructBeginError:
ReadFieldBeginError:
return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
ReadFieldError:
- return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceGetNextResult[fieldId]), err)
+ return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceWarmUpTabletsResult[fieldId]), err)
SkipFieldError:
return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
@@ -13133,17 +24948,18 @@ ReadStructEndError:
return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
}
-func (p *BackendServiceGetNextResult) ReadField0(iprot thrift.TProtocol) error {
- p.Success = dorisexternalservice.NewTScanBatchResult_()
- if err := p.Success.Read(iprot); err != nil {
+func (p *BackendServiceWarmUpTabletsResult) ReadField0(iprot thrift.TProtocol) error {
+ _field := NewTWarmUpTabletsResponse()
+ if err := _field.Read(iprot); err != nil {
return err
}
+ p.Success = _field
return nil
}
-func (p *BackendServiceGetNextResult) Write(oprot thrift.TProtocol) (err error) {
+func (p *BackendServiceWarmUpTabletsResult) Write(oprot thrift.TProtocol) (err error) {
var fieldId int16
- if err = oprot.WriteStructBegin("get_next_result"); err != nil {
+ if err = oprot.WriteStructBegin("warm_up_tablets_result"); err != nil {
goto WriteStructBeginError
}
if p != nil {
@@ -13151,7 +24967,6 @@ func (p *BackendServiceGetNextResult) Write(oprot thrift.TProtocol) (err error)
fieldId = 0
goto WriteFieldError
}
-
}
if err = oprot.WriteFieldStop(); err != nil {
goto WriteFieldStopError
@@ -13170,7 +24985,7 @@ WriteStructEndError:
return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err)
}
-func (p *BackendServiceGetNextResult) writeField0(oprot thrift.TProtocol) (err error) {
+func (p *BackendServiceWarmUpTabletsResult) writeField0(oprot thrift.TProtocol) (err error) {
if p.IsSetSuccess() {
if err = oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil {
goto WriteFieldBeginError
@@ -13189,14 +25004,15 @@ WriteFieldEndError:
return thrift.PrependError(fmt.Sprintf("%T write field 0 end error: ", p), err)
}
-func (p *BackendServiceGetNextResult) String() string {
+func (p *BackendServiceWarmUpTabletsResult) String() string {
if p == nil {
return ""
}
- return fmt.Sprintf("BackendServiceGetNextResult(%+v)", *p)
+ return fmt.Sprintf("BackendServiceWarmUpTabletsResult(%+v)", *p)
+
}
-func (p *BackendServiceGetNextResult) DeepEqual(ano *BackendServiceGetNextResult) bool {
+func (p *BackendServiceWarmUpTabletsResult) DeepEqual(ano *BackendServiceWarmUpTabletsResult) bool {
if p == ano {
return true
} else if p == nil || ano == nil {
@@ -13208,7 +25024,7 @@ func (p *BackendServiceGetNextResult) DeepEqual(ano *BackendServiceGetNextResult
return true
}
-func (p *BackendServiceGetNextResult) Field0DeepEqual(src *dorisexternalservice.TScanBatchResult_) bool {
+func (p *BackendServiceWarmUpTabletsResult) Field0DeepEqual(src *TWarmUpTabletsResponse) bool {
if !p.Success.DeepEqual(src) {
return false
@@ -13216,39 +25032,38 @@ func (p *BackendServiceGetNextResult) Field0DeepEqual(src *dorisexternalservice.
return true
}
-type BackendServiceCloseScannerArgs struct {
- Params *dorisexternalservice.TScanCloseParams `thrift:"params,1" frugal:"1,default,dorisexternalservice.TScanCloseParams" json:"params"`
+type BackendServiceIngestBinlogArgs struct {
+ IngestBinlogRequest *TIngestBinlogRequest `thrift:"ingest_binlog_request,1" frugal:"1,default,TIngestBinlogRequest" json:"ingest_binlog_request"`
}
-func NewBackendServiceCloseScannerArgs() *BackendServiceCloseScannerArgs {
- return &BackendServiceCloseScannerArgs{}
+func NewBackendServiceIngestBinlogArgs() *BackendServiceIngestBinlogArgs {
+ return &BackendServiceIngestBinlogArgs{}
}
-func (p *BackendServiceCloseScannerArgs) InitDefault() {
- *p = BackendServiceCloseScannerArgs{}
+func (p *BackendServiceIngestBinlogArgs) InitDefault() {
}
-var BackendServiceCloseScannerArgs_Params_DEFAULT *dorisexternalservice.TScanCloseParams
+var BackendServiceIngestBinlogArgs_IngestBinlogRequest_DEFAULT *TIngestBinlogRequest
-func (p *BackendServiceCloseScannerArgs) GetParams() (v *dorisexternalservice.TScanCloseParams) {
- if !p.IsSetParams() {
- return BackendServiceCloseScannerArgs_Params_DEFAULT
+func (p *BackendServiceIngestBinlogArgs) GetIngestBinlogRequest() (v *TIngestBinlogRequest) {
+ if !p.IsSetIngestBinlogRequest() {
+ return BackendServiceIngestBinlogArgs_IngestBinlogRequest_DEFAULT
}
- return p.Params
+ return p.IngestBinlogRequest
}
-func (p *BackendServiceCloseScannerArgs) SetParams(val *dorisexternalservice.TScanCloseParams) {
- p.Params = val
+func (p *BackendServiceIngestBinlogArgs) SetIngestBinlogRequest(val *TIngestBinlogRequest) {
+ p.IngestBinlogRequest = val
}
-var fieldIDToName_BackendServiceCloseScannerArgs = map[int16]string{
- 1: "params",
+var fieldIDToName_BackendServiceIngestBinlogArgs = map[int16]string{
+ 1: "ingest_binlog_request",
}
-func (p *BackendServiceCloseScannerArgs) IsSetParams() bool {
- return p.Params != nil
+func (p *BackendServiceIngestBinlogArgs) IsSetIngestBinlogRequest() bool {
+ return p.IngestBinlogRequest != nil
}
-func (p *BackendServiceCloseScannerArgs) Read(iprot thrift.TProtocol) (err error) {
+func (p *BackendServiceIngestBinlogArgs) Read(iprot thrift.TProtocol) (err error) {
var fieldTypeId thrift.TType
var fieldId int16
@@ -13272,17 +25087,14 @@ func (p *BackendServiceCloseScannerArgs) Read(iprot thrift.TProtocol) (err error
if err = p.ReadField1(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
default:
if err = iprot.Skip(fieldTypeId); err != nil {
goto SkipFieldError
}
}
-
if err = iprot.ReadFieldEnd(); err != nil {
goto ReadFieldEndError
}
@@ -13297,7 +25109,7 @@ ReadStructBeginError:
ReadFieldBeginError:
return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
ReadFieldError:
- return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceCloseScannerArgs[fieldId]), err)
+ return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceIngestBinlogArgs[fieldId]), err)
SkipFieldError:
return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
@@ -13307,17 +25119,18 @@ ReadStructEndError:
return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
}
-func (p *BackendServiceCloseScannerArgs) ReadField1(iprot thrift.TProtocol) error {
- p.Params = dorisexternalservice.NewTScanCloseParams()
- if err := p.Params.Read(iprot); err != nil {
+func (p *BackendServiceIngestBinlogArgs) ReadField1(iprot thrift.TProtocol) error {
+ _field := NewTIngestBinlogRequest()
+ if err := _field.Read(iprot); err != nil {
return err
}
+ p.IngestBinlogRequest = _field
return nil
}
-func (p *BackendServiceCloseScannerArgs) Write(oprot thrift.TProtocol) (err error) {
+func (p *BackendServiceIngestBinlogArgs) Write(oprot thrift.TProtocol) (err error) {
var fieldId int16
- if err = oprot.WriteStructBegin("close_scanner_args"); err != nil {
+ if err = oprot.WriteStructBegin("ingest_binlog_args"); err != nil {
goto WriteStructBeginError
}
if p != nil {
@@ -13325,7 +25138,6 @@ func (p *BackendServiceCloseScannerArgs) Write(oprot thrift.TProtocol) (err erro
fieldId = 1
goto WriteFieldError
}
-
}
if err = oprot.WriteFieldStop(); err != nil {
goto WriteFieldStopError
@@ -13344,11 +25156,11 @@ WriteStructEndError:
return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err)
}
-func (p *BackendServiceCloseScannerArgs) writeField1(oprot thrift.TProtocol) (err error) {
- if err = oprot.WriteFieldBegin("params", thrift.STRUCT, 1); err != nil {
+func (p *BackendServiceIngestBinlogArgs) writeField1(oprot thrift.TProtocol) (err error) {
+ if err = oprot.WriteFieldBegin("ingest_binlog_request", thrift.STRUCT, 1); err != nil {
goto WriteFieldBeginError
}
- if err := p.Params.Write(oprot); err != nil {
+ if err := p.IngestBinlogRequest.Write(oprot); err != nil {
return err
}
if err = oprot.WriteFieldEnd(); err != nil {
@@ -13361,66 +25173,66 @@ WriteFieldEndError:
return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err)
}
-func (p *BackendServiceCloseScannerArgs) String() string {
+func (p *BackendServiceIngestBinlogArgs) String() string {
if p == nil {
return ""
}
- return fmt.Sprintf("BackendServiceCloseScannerArgs(%+v)", *p)
+ return fmt.Sprintf("BackendServiceIngestBinlogArgs(%+v)", *p)
+
}
-func (p *BackendServiceCloseScannerArgs) DeepEqual(ano *BackendServiceCloseScannerArgs) bool {
+func (p *BackendServiceIngestBinlogArgs) DeepEqual(ano *BackendServiceIngestBinlogArgs) bool {
if p == ano {
return true
} else if p == nil || ano == nil {
return false
}
- if !p.Field1DeepEqual(ano.Params) {
+ if !p.Field1DeepEqual(ano.IngestBinlogRequest) {
return false
}
return true
}
-func (p *BackendServiceCloseScannerArgs) Field1DeepEqual(src *dorisexternalservice.TScanCloseParams) bool {
+func (p *BackendServiceIngestBinlogArgs) Field1DeepEqual(src *TIngestBinlogRequest) bool {
- if !p.Params.DeepEqual(src) {
+ if !p.IngestBinlogRequest.DeepEqual(src) {
return false
}
return true
}
-type BackendServiceCloseScannerResult struct {
- Success *dorisexternalservice.TScanCloseResult_ `thrift:"success,0,optional" frugal:"0,optional,dorisexternalservice.TScanCloseResult_" json:"success,omitempty"`
+type BackendServiceIngestBinlogResult struct {
+ Success *TIngestBinlogResult_ `thrift:"success,0,optional" frugal:"0,optional,TIngestBinlogResult_" json:"success,omitempty"`
}
-func NewBackendServiceCloseScannerResult() *BackendServiceCloseScannerResult {
- return &BackendServiceCloseScannerResult{}
+func NewBackendServiceIngestBinlogResult() *BackendServiceIngestBinlogResult {
+ return &BackendServiceIngestBinlogResult{}
}
-func (p *BackendServiceCloseScannerResult) InitDefault() {
- *p = BackendServiceCloseScannerResult{}
+func (p *BackendServiceIngestBinlogResult) InitDefault() {
}
-var BackendServiceCloseScannerResult_Success_DEFAULT *dorisexternalservice.TScanCloseResult_
+var BackendServiceIngestBinlogResult_Success_DEFAULT *TIngestBinlogResult_
-func (p *BackendServiceCloseScannerResult) GetSuccess() (v *dorisexternalservice.TScanCloseResult_) {
+func (p *BackendServiceIngestBinlogResult) GetSuccess() (v *TIngestBinlogResult_) {
if !p.IsSetSuccess() {
- return BackendServiceCloseScannerResult_Success_DEFAULT
+ return BackendServiceIngestBinlogResult_Success_DEFAULT
}
return p.Success
}
-func (p *BackendServiceCloseScannerResult) SetSuccess(x interface{}) {
- p.Success = x.(*dorisexternalservice.TScanCloseResult_)
+func (p *BackendServiceIngestBinlogResult) SetSuccess(x interface{}) {
+ p.Success = x.(*TIngestBinlogResult_)
}
-var fieldIDToName_BackendServiceCloseScannerResult = map[int16]string{
+var fieldIDToName_BackendServiceIngestBinlogResult = map[int16]string{
0: "success",
}
-func (p *BackendServiceCloseScannerResult) IsSetSuccess() bool {
+func (p *BackendServiceIngestBinlogResult) IsSetSuccess() bool {
return p.Success != nil
}
-func (p *BackendServiceCloseScannerResult) Read(iprot thrift.TProtocol) (err error) {
+func (p *BackendServiceIngestBinlogResult) Read(iprot thrift.TProtocol) (err error) {
var fieldTypeId thrift.TType
var fieldId int16
@@ -13444,17 +25256,14 @@ func (p *BackendServiceCloseScannerResult) Read(iprot thrift.TProtocol) (err err
if err = p.ReadField0(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
default:
if err = iprot.Skip(fieldTypeId); err != nil {
goto SkipFieldError
}
}
-
if err = iprot.ReadFieldEnd(); err != nil {
goto ReadFieldEndError
}
@@ -13469,7 +25278,7 @@ ReadStructBeginError:
ReadFieldBeginError:
return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
ReadFieldError:
- return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceCloseScannerResult[fieldId]), err)
+ return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceIngestBinlogResult[fieldId]), err)
SkipFieldError:
return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
@@ -13479,17 +25288,18 @@ ReadStructEndError:
return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
}
-func (p *BackendServiceCloseScannerResult) ReadField0(iprot thrift.TProtocol) error {
- p.Success = dorisexternalservice.NewTScanCloseResult_()
- if err := p.Success.Read(iprot); err != nil {
+func (p *BackendServiceIngestBinlogResult) ReadField0(iprot thrift.TProtocol) error {
+ _field := NewTIngestBinlogResult_()
+ if err := _field.Read(iprot); err != nil {
return err
}
+ p.Success = _field
return nil
}
-func (p *BackendServiceCloseScannerResult) Write(oprot thrift.TProtocol) (err error) {
+func (p *BackendServiceIngestBinlogResult) Write(oprot thrift.TProtocol) (err error) {
var fieldId int16
- if err = oprot.WriteStructBegin("close_scanner_result"); err != nil {
+ if err = oprot.WriteStructBegin("ingest_binlog_result"); err != nil {
goto WriteStructBeginError
}
if p != nil {
@@ -13497,7 +25307,6 @@ func (p *BackendServiceCloseScannerResult) Write(oprot thrift.TProtocol) (err er
fieldId = 0
goto WriteFieldError
}
-
}
if err = oprot.WriteFieldStop(); err != nil {
goto WriteFieldStopError
@@ -13516,7 +25325,7 @@ WriteStructEndError:
return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err)
}
-func (p *BackendServiceCloseScannerResult) writeField0(oprot thrift.TProtocol) (err error) {
+func (p *BackendServiceIngestBinlogResult) writeField0(oprot thrift.TProtocol) (err error) {
if p.IsSetSuccess() {
if err = oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil {
goto WriteFieldBeginError
@@ -13535,14 +25344,15 @@ WriteFieldEndError:
return thrift.PrependError(fmt.Sprintf("%T write field 0 end error: ", p), err)
}
-func (p *BackendServiceCloseScannerResult) String() string {
+func (p *BackendServiceIngestBinlogResult) String() string {
if p == nil {
return ""
}
- return fmt.Sprintf("BackendServiceCloseScannerResult(%+v)", *p)
+ return fmt.Sprintf("BackendServiceIngestBinlogResult(%+v)", *p)
+
}
-func (p *BackendServiceCloseScannerResult) DeepEqual(ano *BackendServiceCloseScannerResult) bool {
+func (p *BackendServiceIngestBinlogResult) DeepEqual(ano *BackendServiceIngestBinlogResult) bool {
if p == ano {
return true
} else if p == nil || ano == nil {
@@ -13554,7 +25364,7 @@ func (p *BackendServiceCloseScannerResult) DeepEqual(ano *BackendServiceCloseSca
return true
}
-func (p *BackendServiceCloseScannerResult) Field0DeepEqual(src *dorisexternalservice.TScanCloseResult_) bool {
+func (p *BackendServiceIngestBinlogResult) Field0DeepEqual(src *TIngestBinlogResult_) bool {
if !p.Success.DeepEqual(src) {
return false
@@ -13562,30 +25372,38 @@ func (p *BackendServiceCloseScannerResult) Field0DeepEqual(src *dorisexternalser
return true
}
-type BackendServiceGetStreamLoadRecordArgs struct {
- LastStreamRecordTime int64 `thrift:"last_stream_record_time,1" frugal:"1,default,i64" json:"last_stream_record_time"`
+type BackendServiceQueryIngestBinlogArgs struct {
+ QueryIngestBinlogRequest *TQueryIngestBinlogRequest `thrift:"query_ingest_binlog_request,1" frugal:"1,default,TQueryIngestBinlogRequest" json:"query_ingest_binlog_request"`
}
-func NewBackendServiceGetStreamLoadRecordArgs() *BackendServiceGetStreamLoadRecordArgs {
- return &BackendServiceGetStreamLoadRecordArgs{}
+func NewBackendServiceQueryIngestBinlogArgs() *BackendServiceQueryIngestBinlogArgs {
+ return &BackendServiceQueryIngestBinlogArgs{}
}
-func (p *BackendServiceGetStreamLoadRecordArgs) InitDefault() {
- *p = BackendServiceGetStreamLoadRecordArgs{}
+func (p *BackendServiceQueryIngestBinlogArgs) InitDefault() {
}
-func (p *BackendServiceGetStreamLoadRecordArgs) GetLastStreamRecordTime() (v int64) {
- return p.LastStreamRecordTime
+var BackendServiceQueryIngestBinlogArgs_QueryIngestBinlogRequest_DEFAULT *TQueryIngestBinlogRequest
+
+func (p *BackendServiceQueryIngestBinlogArgs) GetQueryIngestBinlogRequest() (v *TQueryIngestBinlogRequest) {
+ if !p.IsSetQueryIngestBinlogRequest() {
+ return BackendServiceQueryIngestBinlogArgs_QueryIngestBinlogRequest_DEFAULT
+ }
+ return p.QueryIngestBinlogRequest
}
-func (p *BackendServiceGetStreamLoadRecordArgs) SetLastStreamRecordTime(val int64) {
- p.LastStreamRecordTime = val
+func (p *BackendServiceQueryIngestBinlogArgs) SetQueryIngestBinlogRequest(val *TQueryIngestBinlogRequest) {
+ p.QueryIngestBinlogRequest = val
}
-var fieldIDToName_BackendServiceGetStreamLoadRecordArgs = map[int16]string{
- 1: "last_stream_record_time",
+var fieldIDToName_BackendServiceQueryIngestBinlogArgs = map[int16]string{
+ 1: "query_ingest_binlog_request",
}
-func (p *BackendServiceGetStreamLoadRecordArgs) Read(iprot thrift.TProtocol) (err error) {
+func (p *BackendServiceQueryIngestBinlogArgs) IsSetQueryIngestBinlogRequest() bool {
+ return p.QueryIngestBinlogRequest != nil
+}
+
+func (p *BackendServiceQueryIngestBinlogArgs) Read(iprot thrift.TProtocol) (err error) {
var fieldTypeId thrift.TType
var fieldId int16
@@ -13605,21 +25423,18 @@ func (p *BackendServiceGetStreamLoadRecordArgs) Read(iprot thrift.TProtocol) (er
switch fieldId {
case 1:
- if fieldTypeId == thrift.I64 {
+ if fieldTypeId == thrift.STRUCT {
if err = p.ReadField1(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
default:
if err = iprot.Skip(fieldTypeId); err != nil {
goto SkipFieldError
}
}
-
if err = iprot.ReadFieldEnd(); err != nil {
goto ReadFieldEndError
}
@@ -13634,7 +25449,7 @@ ReadStructBeginError:
ReadFieldBeginError:
return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
ReadFieldError:
- return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceGetStreamLoadRecordArgs[fieldId]), err)
+ return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceQueryIngestBinlogArgs[fieldId]), err)
SkipFieldError:
return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
@@ -13644,18 +25459,18 @@ ReadStructEndError:
return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
}
-func (p *BackendServiceGetStreamLoadRecordArgs) ReadField1(iprot thrift.TProtocol) error {
- if v, err := iprot.ReadI64(); err != nil {
+func (p *BackendServiceQueryIngestBinlogArgs) ReadField1(iprot thrift.TProtocol) error {
+ _field := NewTQueryIngestBinlogRequest()
+ if err := _field.Read(iprot); err != nil {
return err
- } else {
- p.LastStreamRecordTime = v
}
+ p.QueryIngestBinlogRequest = _field
return nil
}
-func (p *BackendServiceGetStreamLoadRecordArgs) Write(oprot thrift.TProtocol) (err error) {
+func (p *BackendServiceQueryIngestBinlogArgs) Write(oprot thrift.TProtocol) (err error) {
var fieldId int16
- if err = oprot.WriteStructBegin("get_stream_load_record_args"); err != nil {
+ if err = oprot.WriteStructBegin("query_ingest_binlog_args"); err != nil {
goto WriteStructBeginError
}
if p != nil {
@@ -13663,7 +25478,6 @@ func (p *BackendServiceGetStreamLoadRecordArgs) Write(oprot thrift.TProtocol) (e
fieldId = 1
goto WriteFieldError
}
-
}
if err = oprot.WriteFieldStop(); err != nil {
goto WriteFieldStopError
@@ -13682,11 +25496,11 @@ WriteStructEndError:
return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err)
}
-func (p *BackendServiceGetStreamLoadRecordArgs) writeField1(oprot thrift.TProtocol) (err error) {
- if err = oprot.WriteFieldBegin("last_stream_record_time", thrift.I64, 1); err != nil {
+func (p *BackendServiceQueryIngestBinlogArgs) writeField1(oprot thrift.TProtocol) (err error) {
+ if err = oprot.WriteFieldBegin("query_ingest_binlog_request", thrift.STRUCT, 1); err != nil {
goto WriteFieldBeginError
}
- if err := oprot.WriteI64(p.LastStreamRecordTime); err != nil {
+ if err := p.QueryIngestBinlogRequest.Write(oprot); err != nil {
return err
}
if err = oprot.WriteFieldEnd(); err != nil {
@@ -13699,66 +25513,66 @@ WriteFieldEndError:
return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err)
}
-func (p *BackendServiceGetStreamLoadRecordArgs) String() string {
+func (p *BackendServiceQueryIngestBinlogArgs) String() string {
if p == nil {
return ""
}
- return fmt.Sprintf("BackendServiceGetStreamLoadRecordArgs(%+v)", *p)
+ return fmt.Sprintf("BackendServiceQueryIngestBinlogArgs(%+v)", *p)
+
}
-func (p *BackendServiceGetStreamLoadRecordArgs) DeepEqual(ano *BackendServiceGetStreamLoadRecordArgs) bool {
+func (p *BackendServiceQueryIngestBinlogArgs) DeepEqual(ano *BackendServiceQueryIngestBinlogArgs) bool {
if p == ano {
return true
} else if p == nil || ano == nil {
return false
}
- if !p.Field1DeepEqual(ano.LastStreamRecordTime) {
+ if !p.Field1DeepEqual(ano.QueryIngestBinlogRequest) {
return false
}
return true
}
-func (p *BackendServiceGetStreamLoadRecordArgs) Field1DeepEqual(src int64) bool {
+func (p *BackendServiceQueryIngestBinlogArgs) Field1DeepEqual(src *TQueryIngestBinlogRequest) bool {
- if p.LastStreamRecordTime != src {
+ if !p.QueryIngestBinlogRequest.DeepEqual(src) {
return false
}
return true
}
-type BackendServiceGetStreamLoadRecordResult struct {
- Success *TStreamLoadRecordResult_ `thrift:"success,0,optional" frugal:"0,optional,TStreamLoadRecordResult_" json:"success,omitempty"`
+type BackendServiceQueryIngestBinlogResult struct {
+ Success *TQueryIngestBinlogResult_ `thrift:"success,0,optional" frugal:"0,optional,TQueryIngestBinlogResult_" json:"success,omitempty"`
}
-func NewBackendServiceGetStreamLoadRecordResult() *BackendServiceGetStreamLoadRecordResult {
- return &BackendServiceGetStreamLoadRecordResult{}
+func NewBackendServiceQueryIngestBinlogResult() *BackendServiceQueryIngestBinlogResult {
+ return &BackendServiceQueryIngestBinlogResult{}
}
-func (p *BackendServiceGetStreamLoadRecordResult) InitDefault() {
- *p = BackendServiceGetStreamLoadRecordResult{}
+func (p *BackendServiceQueryIngestBinlogResult) InitDefault() {
}
-var BackendServiceGetStreamLoadRecordResult_Success_DEFAULT *TStreamLoadRecordResult_
+var BackendServiceQueryIngestBinlogResult_Success_DEFAULT *TQueryIngestBinlogResult_
-func (p *BackendServiceGetStreamLoadRecordResult) GetSuccess() (v *TStreamLoadRecordResult_) {
+func (p *BackendServiceQueryIngestBinlogResult) GetSuccess() (v *TQueryIngestBinlogResult_) {
if !p.IsSetSuccess() {
- return BackendServiceGetStreamLoadRecordResult_Success_DEFAULT
+ return BackendServiceQueryIngestBinlogResult_Success_DEFAULT
}
return p.Success
}
-func (p *BackendServiceGetStreamLoadRecordResult) SetSuccess(x interface{}) {
- p.Success = x.(*TStreamLoadRecordResult_)
+func (p *BackendServiceQueryIngestBinlogResult) SetSuccess(x interface{}) {
+ p.Success = x.(*TQueryIngestBinlogResult_)
}
-var fieldIDToName_BackendServiceGetStreamLoadRecordResult = map[int16]string{
+var fieldIDToName_BackendServiceQueryIngestBinlogResult = map[int16]string{
0: "success",
}
-func (p *BackendServiceGetStreamLoadRecordResult) IsSetSuccess() bool {
+func (p *BackendServiceQueryIngestBinlogResult) IsSetSuccess() bool {
return p.Success != nil
}
-func (p *BackendServiceGetStreamLoadRecordResult) Read(iprot thrift.TProtocol) (err error) {
+func (p *BackendServiceQueryIngestBinlogResult) Read(iprot thrift.TProtocol) (err error) {
var fieldTypeId thrift.TType
var fieldId int16
@@ -13782,17 +25596,14 @@ func (p *BackendServiceGetStreamLoadRecordResult) Read(iprot thrift.TProtocol) (
if err = p.ReadField0(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
default:
if err = iprot.Skip(fieldTypeId); err != nil {
goto SkipFieldError
}
}
-
if err = iprot.ReadFieldEnd(); err != nil {
goto ReadFieldEndError
}
@@ -13807,7 +25618,7 @@ ReadStructBeginError:
ReadFieldBeginError:
return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
ReadFieldError:
- return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceGetStreamLoadRecordResult[fieldId]), err)
+ return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceQueryIngestBinlogResult[fieldId]), err)
SkipFieldError:
return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
@@ -13817,17 +25628,18 @@ ReadStructEndError:
return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
}
-func (p *BackendServiceGetStreamLoadRecordResult) ReadField0(iprot thrift.TProtocol) error {
- p.Success = NewTStreamLoadRecordResult_()
- if err := p.Success.Read(iprot); err != nil {
+func (p *BackendServiceQueryIngestBinlogResult) ReadField0(iprot thrift.TProtocol) error {
+ _field := NewTQueryIngestBinlogResult_()
+ if err := _field.Read(iprot); err != nil {
return err
}
+ p.Success = _field
return nil
}
-func (p *BackendServiceGetStreamLoadRecordResult) Write(oprot thrift.TProtocol) (err error) {
+func (p *BackendServiceQueryIngestBinlogResult) Write(oprot thrift.TProtocol) (err error) {
var fieldId int16
- if err = oprot.WriteStructBegin("get_stream_load_record_result"); err != nil {
+ if err = oprot.WriteStructBegin("query_ingest_binlog_result"); err != nil {
goto WriteStructBeginError
}
if p != nil {
@@ -13835,7 +25647,6 @@ func (p *BackendServiceGetStreamLoadRecordResult) Write(oprot thrift.TProtocol)
fieldId = 0
goto WriteFieldError
}
-
}
if err = oprot.WriteFieldStop(); err != nil {
goto WriteFieldStopError
@@ -13854,7 +25665,7 @@ WriteStructEndError:
return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err)
}
-func (p *BackendServiceGetStreamLoadRecordResult) writeField0(oprot thrift.TProtocol) (err error) {
+func (p *BackendServiceQueryIngestBinlogResult) writeField0(oprot thrift.TProtocol) (err error) {
if p.IsSetSuccess() {
if err = oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil {
goto WriteFieldBeginError
@@ -13873,14 +25684,15 @@ WriteFieldEndError:
return thrift.PrependError(fmt.Sprintf("%T write field 0 end error: ", p), err)
}
-func (p *BackendServiceGetStreamLoadRecordResult) String() string {
+func (p *BackendServiceQueryIngestBinlogResult) String() string {
if p == nil {
return ""
}
- return fmt.Sprintf("BackendServiceGetStreamLoadRecordResult(%+v)", *p)
+ return fmt.Sprintf("BackendServiceQueryIngestBinlogResult(%+v)", *p)
+
}
-func (p *BackendServiceGetStreamLoadRecordResult) DeepEqual(ano *BackendServiceGetStreamLoadRecordResult) bool {
+func (p *BackendServiceQueryIngestBinlogResult) DeepEqual(ano *BackendServiceQueryIngestBinlogResult) bool {
if p == ano {
return true
} else if p == nil || ano == nil {
@@ -13892,7 +25704,7 @@ func (p *BackendServiceGetStreamLoadRecordResult) DeepEqual(ano *BackendServiceG
return true
}
-func (p *BackendServiceGetStreamLoadRecordResult) Field0DeepEqual(src *TStreamLoadRecordResult_) bool {
+func (p *BackendServiceQueryIngestBinlogResult) Field0DeepEqual(src *TQueryIngestBinlogResult_) bool {
if !p.Success.DeepEqual(src) {
return false
@@ -13900,114 +25712,38 @@ func (p *BackendServiceGetStreamLoadRecordResult) Field0DeepEqual(src *TStreamLo
return true
}
-type BackendServiceCleanTrashArgs struct {
+type BackendServicePublishTopicInfoArgs struct {
+ TopicRequest *TPublishTopicRequest `thrift:"topic_request,1" frugal:"1,default,TPublishTopicRequest" json:"topic_request"`
}
-func NewBackendServiceCleanTrashArgs() *BackendServiceCleanTrashArgs {
- return &BackendServiceCleanTrashArgs{}
-}
-
-func (p *BackendServiceCleanTrashArgs) InitDefault() {
- *p = BackendServiceCleanTrashArgs{}
-}
-
-var fieldIDToName_BackendServiceCleanTrashArgs = map[int16]string{}
-
-func (p *BackendServiceCleanTrashArgs) Read(iprot thrift.TProtocol) (err error) {
-
- var fieldTypeId thrift.TType
- var fieldId int16
-
- if _, err = iprot.ReadStructBegin(); err != nil {
- goto ReadStructBeginError
- }
-
- for {
- _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin()
- if err != nil {
- goto ReadFieldBeginError
- }
- if fieldTypeId == thrift.STOP {
- break
- }
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldTypeError
- }
-
- if err = iprot.ReadFieldEnd(); err != nil {
- goto ReadFieldEndError
- }
- }
- if err = iprot.ReadStructEnd(); err != nil {
- goto ReadStructEndError
- }
-
- return nil
-ReadStructBeginError:
- return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err)
-ReadFieldBeginError:
- return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
-SkipFieldTypeError:
- return thrift.PrependError(fmt.Sprintf("%T skip field type %d error", p, fieldTypeId), err)
-
-ReadFieldEndError:
- return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err)
-ReadStructEndError:
- return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+func NewBackendServicePublishTopicInfoArgs() *BackendServicePublishTopicInfoArgs {
+ return &BackendServicePublishTopicInfoArgs{}
}
-func (p *BackendServiceCleanTrashArgs) Write(oprot thrift.TProtocol) (err error) {
- if err = oprot.WriteStructBegin("clean_trash_args"); err != nil {
- goto WriteStructBeginError
- }
- if p != nil {
-
- }
- if err = oprot.WriteFieldStop(); err != nil {
- goto WriteFieldStopError
- }
- if err = oprot.WriteStructEnd(); err != nil {
- goto WriteStructEndError
- }
- return nil
-WriteStructBeginError:
- return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
-WriteFieldStopError:
- return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err)
-WriteStructEndError:
- return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err)
+func (p *BackendServicePublishTopicInfoArgs) InitDefault() {
}
-func (p *BackendServiceCleanTrashArgs) String() string {
- if p == nil {
- return ""
- }
- return fmt.Sprintf("BackendServiceCleanTrashArgs(%+v)", *p)
-}
+var BackendServicePublishTopicInfoArgs_TopicRequest_DEFAULT *TPublishTopicRequest
-func (p *BackendServiceCleanTrashArgs) DeepEqual(ano *BackendServiceCleanTrashArgs) bool {
- if p == ano {
- return true
- } else if p == nil || ano == nil {
- return false
+func (p *BackendServicePublishTopicInfoArgs) GetTopicRequest() (v *TPublishTopicRequest) {
+ if !p.IsSetTopicRequest() {
+ return BackendServicePublishTopicInfoArgs_TopicRequest_DEFAULT
}
- return true
+ return p.TopicRequest
}
-
-type BackendServiceCheckStorageFormatArgs struct {
+func (p *BackendServicePublishTopicInfoArgs) SetTopicRequest(val *TPublishTopicRequest) {
+ p.TopicRequest = val
}
-func NewBackendServiceCheckStorageFormatArgs() *BackendServiceCheckStorageFormatArgs {
- return &BackendServiceCheckStorageFormatArgs{}
+var fieldIDToName_BackendServicePublishTopicInfoArgs = map[int16]string{
+ 1: "topic_request",
}
-func (p *BackendServiceCheckStorageFormatArgs) InitDefault() {
- *p = BackendServiceCheckStorageFormatArgs{}
+func (p *BackendServicePublishTopicInfoArgs) IsSetTopicRequest() bool {
+ return p.TopicRequest != nil
}
-var fieldIDToName_BackendServiceCheckStorageFormatArgs = map[int16]string{}
-
-func (p *BackendServiceCheckStorageFormatArgs) Read(iprot thrift.TProtocol) (err error) {
+func (p *BackendServicePublishTopicInfoArgs) Read(iprot thrift.TProtocol) (err error) {
var fieldTypeId thrift.TType
var fieldId int16
@@ -14024,10 +25760,21 @@ func (p *BackendServiceCheckStorageFormatArgs) Read(iprot thrift.TProtocol) (err
if fieldTypeId == thrift.STOP {
break
}
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldTypeError
- }
+ switch fieldId {
+ case 1:
+ if fieldTypeId == thrift.STRUCT {
+ if err = p.ReadField1(iprot); err != nil {
+ goto ReadFieldError
+ }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ default:
+ if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ }
if err = iprot.ReadFieldEnd(); err != nil {
goto ReadFieldEndError
}
@@ -14041,8 +25788,10 @@ ReadStructBeginError:
return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err)
ReadFieldBeginError:
return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
-SkipFieldTypeError:
- return thrift.PrependError(fmt.Sprintf("%T skip field type %d error", p, fieldTypeId), err)
+ReadFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServicePublishTopicInfoArgs[fieldId]), err)
+SkipFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
ReadFieldEndError:
return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err)
@@ -14050,12 +25799,25 @@ ReadStructEndError:
return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
}
-func (p *BackendServiceCheckStorageFormatArgs) Write(oprot thrift.TProtocol) (err error) {
- if err = oprot.WriteStructBegin("check_storage_format_args"); err != nil {
+func (p *BackendServicePublishTopicInfoArgs) ReadField1(iprot thrift.TProtocol) error {
+ _field := NewTPublishTopicRequest()
+ if err := _field.Read(iprot); err != nil {
+ return err
+ }
+ p.TopicRequest = _field
+ return nil
+}
+
+func (p *BackendServicePublishTopicInfoArgs) Write(oprot thrift.TProtocol) (err error) {
+ var fieldId int16
+ if err = oprot.WriteStructBegin("publish_topic_info_args"); err != nil {
goto WriteStructBeginError
}
if p != nil {
-
+ if err = p.writeField1(oprot); err != nil {
+ fieldId = 1
+ goto WriteFieldError
+ }
}
if err = oprot.WriteFieldStop(); err != nil {
goto WriteFieldStopError
@@ -14066,61 +25828,91 @@ func (p *BackendServiceCheckStorageFormatArgs) Write(oprot thrift.TProtocol) (er
return nil
WriteStructBeginError:
return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+WriteFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err)
WriteFieldStopError:
return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err)
WriteStructEndError:
return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err)
}
-func (p *BackendServiceCheckStorageFormatArgs) String() string {
+func (p *BackendServicePublishTopicInfoArgs) writeField1(oprot thrift.TProtocol) (err error) {
+ if err = oprot.WriteFieldBegin("topic_request", thrift.STRUCT, 1); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := p.TopicRequest.Write(oprot); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err)
+}
+
+func (p *BackendServicePublishTopicInfoArgs) String() string {
if p == nil {
return ""
}
- return fmt.Sprintf("BackendServiceCheckStorageFormatArgs(%+v)", *p)
+ return fmt.Sprintf("BackendServicePublishTopicInfoArgs(%+v)", *p)
+
}
-func (p *BackendServiceCheckStorageFormatArgs) DeepEqual(ano *BackendServiceCheckStorageFormatArgs) bool {
+func (p *BackendServicePublishTopicInfoArgs) DeepEqual(ano *BackendServicePublishTopicInfoArgs) bool {
if p == ano {
return true
} else if p == nil || ano == nil {
return false
}
+ if !p.Field1DeepEqual(ano.TopicRequest) {
+ return false
+ }
return true
}
-type BackendServiceCheckStorageFormatResult struct {
- Success *TCheckStorageFormatResult_ `thrift:"success,0,optional" frugal:"0,optional,TCheckStorageFormatResult_" json:"success,omitempty"`
+func (p *BackendServicePublishTopicInfoArgs) Field1DeepEqual(src *TPublishTopicRequest) bool {
+
+ if !p.TopicRequest.DeepEqual(src) {
+ return false
+ }
+ return true
}
-func NewBackendServiceCheckStorageFormatResult() *BackendServiceCheckStorageFormatResult {
- return &BackendServiceCheckStorageFormatResult{}
+type BackendServicePublishTopicInfoResult struct {
+ Success *TPublishTopicResult_ `thrift:"success,0,optional" frugal:"0,optional,TPublishTopicResult_" json:"success,omitempty"`
}
-func (p *BackendServiceCheckStorageFormatResult) InitDefault() {
- *p = BackendServiceCheckStorageFormatResult{}
+func NewBackendServicePublishTopicInfoResult() *BackendServicePublishTopicInfoResult {
+ return &BackendServicePublishTopicInfoResult{}
}
-var BackendServiceCheckStorageFormatResult_Success_DEFAULT *TCheckStorageFormatResult_
+func (p *BackendServicePublishTopicInfoResult) InitDefault() {
+}
-func (p *BackendServiceCheckStorageFormatResult) GetSuccess() (v *TCheckStorageFormatResult_) {
+var BackendServicePublishTopicInfoResult_Success_DEFAULT *TPublishTopicResult_
+
+func (p *BackendServicePublishTopicInfoResult) GetSuccess() (v *TPublishTopicResult_) {
if !p.IsSetSuccess() {
- return BackendServiceCheckStorageFormatResult_Success_DEFAULT
+ return BackendServicePublishTopicInfoResult_Success_DEFAULT
}
return p.Success
}
-func (p *BackendServiceCheckStorageFormatResult) SetSuccess(x interface{}) {
- p.Success = x.(*TCheckStorageFormatResult_)
+func (p *BackendServicePublishTopicInfoResult) SetSuccess(x interface{}) {
+ p.Success = x.(*TPublishTopicResult_)
}
-var fieldIDToName_BackendServiceCheckStorageFormatResult = map[int16]string{
+var fieldIDToName_BackendServicePublishTopicInfoResult = map[int16]string{
0: "success",
}
-func (p *BackendServiceCheckStorageFormatResult) IsSetSuccess() bool {
+func (p *BackendServicePublishTopicInfoResult) IsSetSuccess() bool {
return p.Success != nil
}
-func (p *BackendServiceCheckStorageFormatResult) Read(iprot thrift.TProtocol) (err error) {
+func (p *BackendServicePublishTopicInfoResult) Read(iprot thrift.TProtocol) (err error) {
var fieldTypeId thrift.TType
var fieldId int16
@@ -14144,17 +25936,14 @@ func (p *BackendServiceCheckStorageFormatResult) Read(iprot thrift.TProtocol) (e
if err = p.ReadField0(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
default:
if err = iprot.Skip(fieldTypeId); err != nil {
goto SkipFieldError
}
}
-
if err = iprot.ReadFieldEnd(); err != nil {
goto ReadFieldEndError
}
@@ -14169,7 +25958,7 @@ ReadStructBeginError:
ReadFieldBeginError:
return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
ReadFieldError:
- return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceCheckStorageFormatResult[fieldId]), err)
+ return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServicePublishTopicInfoResult[fieldId]), err)
SkipFieldError:
return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
@@ -14179,17 +25968,18 @@ ReadStructEndError:
return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
}
-func (p *BackendServiceCheckStorageFormatResult) ReadField0(iprot thrift.TProtocol) error {
- p.Success = NewTCheckStorageFormatResult_()
- if err := p.Success.Read(iprot); err != nil {
+func (p *BackendServicePublishTopicInfoResult) ReadField0(iprot thrift.TProtocol) error {
+ _field := NewTPublishTopicResult_()
+ if err := _field.Read(iprot); err != nil {
return err
}
+ p.Success = _field
return nil
}
-func (p *BackendServiceCheckStorageFormatResult) Write(oprot thrift.TProtocol) (err error) {
+func (p *BackendServicePublishTopicInfoResult) Write(oprot thrift.TProtocol) (err error) {
var fieldId int16
- if err = oprot.WriteStructBegin("check_storage_format_result"); err != nil {
+ if err = oprot.WriteStructBegin("publish_topic_info_result"); err != nil {
goto WriteStructBeginError
}
if p != nil {
@@ -14197,7 +25987,6 @@ func (p *BackendServiceCheckStorageFormatResult) Write(oprot thrift.TProtocol) (
fieldId = 0
goto WriteFieldError
}
-
}
if err = oprot.WriteFieldStop(); err != nil {
goto WriteFieldStopError
@@ -14216,7 +26005,7 @@ WriteStructEndError:
return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err)
}
-func (p *BackendServiceCheckStorageFormatResult) writeField0(oprot thrift.TProtocol) (err error) {
+func (p *BackendServicePublishTopicInfoResult) writeField0(oprot thrift.TProtocol) (err error) {
if p.IsSetSuccess() {
if err = oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil {
goto WriteFieldBeginError
@@ -14235,14 +26024,15 @@ WriteFieldEndError:
return thrift.PrependError(fmt.Sprintf("%T write field 0 end error: ", p), err)
}
-func (p *BackendServiceCheckStorageFormatResult) String() string {
+func (p *BackendServicePublishTopicInfoResult) String() string {
if p == nil {
return ""
}
- return fmt.Sprintf("BackendServiceCheckStorageFormatResult(%+v)", *p)
+ return fmt.Sprintf("BackendServicePublishTopicInfoResult(%+v)", *p)
+
}
-func (p *BackendServiceCheckStorageFormatResult) DeepEqual(ano *BackendServiceCheckStorageFormatResult) bool {
+func (p *BackendServicePublishTopicInfoResult) DeepEqual(ano *BackendServicePublishTopicInfoResult) bool {
if p == ano {
return true
} else if p == nil || ano == nil {
@@ -14254,7 +26044,7 @@ func (p *BackendServiceCheckStorageFormatResult) DeepEqual(ano *BackendServiceCh
return true
}
-func (p *BackendServiceCheckStorageFormatResult) Field0DeepEqual(src *TCheckStorageFormatResult_) bool {
+func (p *BackendServicePublishTopicInfoResult) Field0DeepEqual(src *TPublishTopicResult_) bool {
if !p.Success.DeepEqual(src) {
return false
@@ -14262,39 +26052,38 @@ func (p *BackendServiceCheckStorageFormatResult) Field0DeepEqual(src *TCheckStor
return true
}
-type BackendServiceIngestBinlogArgs struct {
- IngestBinlogRequest *TIngestBinlogRequest `thrift:"ingest_binlog_request,1" frugal:"1,default,TIngestBinlogRequest" json:"ingest_binlog_request"`
+type BackendServiceGetRealtimeExecStatusArgs struct {
+ Request *TGetRealtimeExecStatusRequest `thrift:"request,1" frugal:"1,default,TGetRealtimeExecStatusRequest" json:"request"`
}
-func NewBackendServiceIngestBinlogArgs() *BackendServiceIngestBinlogArgs {
- return &BackendServiceIngestBinlogArgs{}
+func NewBackendServiceGetRealtimeExecStatusArgs() *BackendServiceGetRealtimeExecStatusArgs {
+ return &BackendServiceGetRealtimeExecStatusArgs{}
}
-func (p *BackendServiceIngestBinlogArgs) InitDefault() {
- *p = BackendServiceIngestBinlogArgs{}
+func (p *BackendServiceGetRealtimeExecStatusArgs) InitDefault() {
}
-var BackendServiceIngestBinlogArgs_IngestBinlogRequest_DEFAULT *TIngestBinlogRequest
+var BackendServiceGetRealtimeExecStatusArgs_Request_DEFAULT *TGetRealtimeExecStatusRequest
-func (p *BackendServiceIngestBinlogArgs) GetIngestBinlogRequest() (v *TIngestBinlogRequest) {
- if !p.IsSetIngestBinlogRequest() {
- return BackendServiceIngestBinlogArgs_IngestBinlogRequest_DEFAULT
+func (p *BackendServiceGetRealtimeExecStatusArgs) GetRequest() (v *TGetRealtimeExecStatusRequest) {
+ if !p.IsSetRequest() {
+ return BackendServiceGetRealtimeExecStatusArgs_Request_DEFAULT
}
- return p.IngestBinlogRequest
+ return p.Request
}
-func (p *BackendServiceIngestBinlogArgs) SetIngestBinlogRequest(val *TIngestBinlogRequest) {
- p.IngestBinlogRequest = val
+func (p *BackendServiceGetRealtimeExecStatusArgs) SetRequest(val *TGetRealtimeExecStatusRequest) {
+ p.Request = val
}
-var fieldIDToName_BackendServiceIngestBinlogArgs = map[int16]string{
- 1: "ingest_binlog_request",
+var fieldIDToName_BackendServiceGetRealtimeExecStatusArgs = map[int16]string{
+ 1: "request",
}
-func (p *BackendServiceIngestBinlogArgs) IsSetIngestBinlogRequest() bool {
- return p.IngestBinlogRequest != nil
+func (p *BackendServiceGetRealtimeExecStatusArgs) IsSetRequest() bool {
+ return p.Request != nil
}
-func (p *BackendServiceIngestBinlogArgs) Read(iprot thrift.TProtocol) (err error) {
+func (p *BackendServiceGetRealtimeExecStatusArgs) Read(iprot thrift.TProtocol) (err error) {
var fieldTypeId thrift.TType
var fieldId int16
@@ -14318,17 +26107,14 @@ func (p *BackendServiceIngestBinlogArgs) Read(iprot thrift.TProtocol) (err error
if err = p.ReadField1(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
default:
if err = iprot.Skip(fieldTypeId); err != nil {
goto SkipFieldError
}
}
-
if err = iprot.ReadFieldEnd(); err != nil {
goto ReadFieldEndError
}
@@ -14343,7 +26129,7 @@ ReadStructBeginError:
ReadFieldBeginError:
return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
ReadFieldError:
- return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceIngestBinlogArgs[fieldId]), err)
+ return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceGetRealtimeExecStatusArgs[fieldId]), err)
SkipFieldError:
return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
@@ -14353,17 +26139,18 @@ ReadStructEndError:
return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
}
-func (p *BackendServiceIngestBinlogArgs) ReadField1(iprot thrift.TProtocol) error {
- p.IngestBinlogRequest = NewTIngestBinlogRequest()
- if err := p.IngestBinlogRequest.Read(iprot); err != nil {
+func (p *BackendServiceGetRealtimeExecStatusArgs) ReadField1(iprot thrift.TProtocol) error {
+ _field := NewTGetRealtimeExecStatusRequest()
+ if err := _field.Read(iprot); err != nil {
return err
}
+ p.Request = _field
return nil
}
-func (p *BackendServiceIngestBinlogArgs) Write(oprot thrift.TProtocol) (err error) {
+func (p *BackendServiceGetRealtimeExecStatusArgs) Write(oprot thrift.TProtocol) (err error) {
var fieldId int16
- if err = oprot.WriteStructBegin("ingest_binlog_args"); err != nil {
+ if err = oprot.WriteStructBegin("get_realtime_exec_status_args"); err != nil {
goto WriteStructBeginError
}
if p != nil {
@@ -14371,7 +26158,6 @@ func (p *BackendServiceIngestBinlogArgs) Write(oprot thrift.TProtocol) (err erro
fieldId = 1
goto WriteFieldError
}
-
}
if err = oprot.WriteFieldStop(); err != nil {
goto WriteFieldStopError
@@ -14390,11 +26176,11 @@ WriteStructEndError:
return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err)
}
-func (p *BackendServiceIngestBinlogArgs) writeField1(oprot thrift.TProtocol) (err error) {
- if err = oprot.WriteFieldBegin("ingest_binlog_request", thrift.STRUCT, 1); err != nil {
+func (p *BackendServiceGetRealtimeExecStatusArgs) writeField1(oprot thrift.TProtocol) (err error) {
+ if err = oprot.WriteFieldBegin("request", thrift.STRUCT, 1); err != nil {
goto WriteFieldBeginError
}
- if err := p.IngestBinlogRequest.Write(oprot); err != nil {
+ if err := p.Request.Write(oprot); err != nil {
return err
}
if err = oprot.WriteFieldEnd(); err != nil {
@@ -14407,66 +26193,66 @@ WriteFieldEndError:
return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err)
}
-func (p *BackendServiceIngestBinlogArgs) String() string {
+func (p *BackendServiceGetRealtimeExecStatusArgs) String() string {
if p == nil {
return ""
}
- return fmt.Sprintf("BackendServiceIngestBinlogArgs(%+v)", *p)
+ return fmt.Sprintf("BackendServiceGetRealtimeExecStatusArgs(%+v)", *p)
+
}
-func (p *BackendServiceIngestBinlogArgs) DeepEqual(ano *BackendServiceIngestBinlogArgs) bool {
+func (p *BackendServiceGetRealtimeExecStatusArgs) DeepEqual(ano *BackendServiceGetRealtimeExecStatusArgs) bool {
if p == ano {
return true
} else if p == nil || ano == nil {
return false
}
- if !p.Field1DeepEqual(ano.IngestBinlogRequest) {
+ if !p.Field1DeepEqual(ano.Request) {
return false
}
return true
}
-func (p *BackendServiceIngestBinlogArgs) Field1DeepEqual(src *TIngestBinlogRequest) bool {
+func (p *BackendServiceGetRealtimeExecStatusArgs) Field1DeepEqual(src *TGetRealtimeExecStatusRequest) bool {
- if !p.IngestBinlogRequest.DeepEqual(src) {
+ if !p.Request.DeepEqual(src) {
return false
}
return true
}
-type BackendServiceIngestBinlogResult struct {
- Success *TIngestBinlogResult_ `thrift:"success,0,optional" frugal:"0,optional,TIngestBinlogResult_" json:"success,omitempty"`
+type BackendServiceGetRealtimeExecStatusResult struct {
+ Success *TGetRealtimeExecStatusResponse `thrift:"success,0,optional" frugal:"0,optional,TGetRealtimeExecStatusResponse" json:"success,omitempty"`
}
-func NewBackendServiceIngestBinlogResult() *BackendServiceIngestBinlogResult {
- return &BackendServiceIngestBinlogResult{}
+func NewBackendServiceGetRealtimeExecStatusResult() *BackendServiceGetRealtimeExecStatusResult {
+ return &BackendServiceGetRealtimeExecStatusResult{}
}
-func (p *BackendServiceIngestBinlogResult) InitDefault() {
- *p = BackendServiceIngestBinlogResult{}
+func (p *BackendServiceGetRealtimeExecStatusResult) InitDefault() {
}
-var BackendServiceIngestBinlogResult_Success_DEFAULT *TIngestBinlogResult_
+var BackendServiceGetRealtimeExecStatusResult_Success_DEFAULT *TGetRealtimeExecStatusResponse
-func (p *BackendServiceIngestBinlogResult) GetSuccess() (v *TIngestBinlogResult_) {
+func (p *BackendServiceGetRealtimeExecStatusResult) GetSuccess() (v *TGetRealtimeExecStatusResponse) {
if !p.IsSetSuccess() {
- return BackendServiceIngestBinlogResult_Success_DEFAULT
+ return BackendServiceGetRealtimeExecStatusResult_Success_DEFAULT
}
return p.Success
}
-func (p *BackendServiceIngestBinlogResult) SetSuccess(x interface{}) {
- p.Success = x.(*TIngestBinlogResult_)
+func (p *BackendServiceGetRealtimeExecStatusResult) SetSuccess(x interface{}) {
+ p.Success = x.(*TGetRealtimeExecStatusResponse)
}
-var fieldIDToName_BackendServiceIngestBinlogResult = map[int16]string{
+var fieldIDToName_BackendServiceGetRealtimeExecStatusResult = map[int16]string{
0: "success",
}
-func (p *BackendServiceIngestBinlogResult) IsSetSuccess() bool {
+func (p *BackendServiceGetRealtimeExecStatusResult) IsSetSuccess() bool {
return p.Success != nil
}
-func (p *BackendServiceIngestBinlogResult) Read(iprot thrift.TProtocol) (err error) {
+func (p *BackendServiceGetRealtimeExecStatusResult) Read(iprot thrift.TProtocol) (err error) {
var fieldTypeId thrift.TType
var fieldId int16
@@ -14490,17 +26276,14 @@ func (p *BackendServiceIngestBinlogResult) Read(iprot thrift.TProtocol) (err err
if err = p.ReadField0(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
default:
if err = iprot.Skip(fieldTypeId); err != nil {
goto SkipFieldError
}
}
-
if err = iprot.ReadFieldEnd(); err != nil {
goto ReadFieldEndError
}
@@ -14515,7 +26298,7 @@ ReadStructBeginError:
ReadFieldBeginError:
return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
ReadFieldError:
- return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceIngestBinlogResult[fieldId]), err)
+ return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceGetRealtimeExecStatusResult[fieldId]), err)
SkipFieldError:
return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
@@ -14525,17 +26308,18 @@ ReadStructEndError:
return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
}
-func (p *BackendServiceIngestBinlogResult) ReadField0(iprot thrift.TProtocol) error {
- p.Success = NewTIngestBinlogResult_()
- if err := p.Success.Read(iprot); err != nil {
+func (p *BackendServiceGetRealtimeExecStatusResult) ReadField0(iprot thrift.TProtocol) error {
+ _field := NewTGetRealtimeExecStatusResponse()
+ if err := _field.Read(iprot); err != nil {
return err
}
+ p.Success = _field
return nil
}
-func (p *BackendServiceIngestBinlogResult) Write(oprot thrift.TProtocol) (err error) {
+func (p *BackendServiceGetRealtimeExecStatusResult) Write(oprot thrift.TProtocol) (err error) {
var fieldId int16
- if err = oprot.WriteStructBegin("ingest_binlog_result"); err != nil {
+ if err = oprot.WriteStructBegin("get_realtime_exec_status_result"); err != nil {
goto WriteStructBeginError
}
if p != nil {
@@ -14543,7 +26327,6 @@ func (p *BackendServiceIngestBinlogResult) Write(oprot thrift.TProtocol) (err er
fieldId = 0
goto WriteFieldError
}
-
}
if err = oprot.WriteFieldStop(); err != nil {
goto WriteFieldStopError
@@ -14562,7 +26345,7 @@ WriteStructEndError:
return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err)
}
-func (p *BackendServiceIngestBinlogResult) writeField0(oprot thrift.TProtocol) (err error) {
+func (p *BackendServiceGetRealtimeExecStatusResult) writeField0(oprot thrift.TProtocol) (err error) {
if p.IsSetSuccess() {
if err = oprot.WriteFieldBegin("success", thrift.STRUCT, 0); err != nil {
goto WriteFieldBeginError
@@ -14581,14 +26364,15 @@ WriteFieldEndError:
return thrift.PrependError(fmt.Sprintf("%T write field 0 end error: ", p), err)
}
-func (p *BackendServiceIngestBinlogResult) String() string {
+func (p *BackendServiceGetRealtimeExecStatusResult) String() string {
if p == nil {
return ""
}
- return fmt.Sprintf("BackendServiceIngestBinlogResult(%+v)", *p)
+ return fmt.Sprintf("BackendServiceGetRealtimeExecStatusResult(%+v)", *p)
+
}
-func (p *BackendServiceIngestBinlogResult) DeepEqual(ano *BackendServiceIngestBinlogResult) bool {
+func (p *BackendServiceGetRealtimeExecStatusResult) DeepEqual(ano *BackendServiceGetRealtimeExecStatusResult) bool {
if p == ano {
return true
} else if p == nil || ano == nil {
@@ -14600,7 +26384,7 @@ func (p *BackendServiceIngestBinlogResult) DeepEqual(ano *BackendServiceIngestBi
return true
}
-func (p *BackendServiceIngestBinlogResult) Field0DeepEqual(src *TIngestBinlogResult_) bool {
+func (p *BackendServiceGetRealtimeExecStatusResult) Field0DeepEqual(src *TGetRealtimeExecStatusResponse) bool {
if !p.Success.DeepEqual(src) {
return false
diff --git a/pkg/rpc/kitex_gen/backendservice/backendservice/backendservice.go b/pkg/rpc/kitex_gen/backendservice/backendservice/backendservice.go
index 434f0d38..65089a73 100644
--- a/pkg/rpc/kitex_gen/backendservice/backendservice/backendservice.go
+++ b/pkg/rpc/kitex_gen/backendservice/backendservice/backendservice.go
@@ -1,4 +1,4 @@
-// Code generated by Kitex v0.4.4. DO NOT EDIT.
+// Code generated by Kitex v0.8.0. DO NOT EDIT.
package backendservice
@@ -42,19 +42,27 @@ func NewServiceInfo() *kitex.ServiceInfo {
"get_next": kitex.NewMethodInfo(getNextHandler, newBackendServiceGetNextArgs, newBackendServiceGetNextResult, false),
"close_scanner": kitex.NewMethodInfo(closeScannerHandler, newBackendServiceCloseScannerArgs, newBackendServiceCloseScannerResult, false),
"get_stream_load_record": kitex.NewMethodInfo(getStreamLoadRecordHandler, newBackendServiceGetStreamLoadRecordArgs, newBackendServiceGetStreamLoadRecordResult, false),
- "clean_trash": kitex.NewMethodInfo(cleanTrashHandler, newBackendServiceCleanTrashArgs, nil, true),
"check_storage_format": kitex.NewMethodInfo(checkStorageFormatHandler, newBackendServiceCheckStorageFormatArgs, newBackendServiceCheckStorageFormatResult, false),
+ "warm_up_cache_async": kitex.NewMethodInfo(warmUpCacheAsyncHandler, newBackendServiceWarmUpCacheAsyncArgs, newBackendServiceWarmUpCacheAsyncResult, false),
+ "check_warm_up_cache_async": kitex.NewMethodInfo(checkWarmUpCacheAsyncHandler, newBackendServiceCheckWarmUpCacheAsyncArgs, newBackendServiceCheckWarmUpCacheAsyncResult, false),
+ "sync_load_for_tablets": kitex.NewMethodInfo(syncLoadForTabletsHandler, newBackendServiceSyncLoadForTabletsArgs, newBackendServiceSyncLoadForTabletsResult, false),
+ "get_top_n_hot_partitions": kitex.NewMethodInfo(getTopNHotPartitionsHandler, newBackendServiceGetTopNHotPartitionsArgs, newBackendServiceGetTopNHotPartitionsResult, false),
+ "warm_up_tablets": kitex.NewMethodInfo(warmUpTabletsHandler, newBackendServiceWarmUpTabletsArgs, newBackendServiceWarmUpTabletsResult, false),
"ingest_binlog": kitex.NewMethodInfo(ingestBinlogHandler, newBackendServiceIngestBinlogArgs, newBackendServiceIngestBinlogResult, false),
+ "query_ingest_binlog": kitex.NewMethodInfo(queryIngestBinlogHandler, newBackendServiceQueryIngestBinlogArgs, newBackendServiceQueryIngestBinlogResult, false),
+ "publish_topic_info": kitex.NewMethodInfo(publishTopicInfoHandler, newBackendServicePublishTopicInfoArgs, newBackendServicePublishTopicInfoResult, false),
+ "get_realtime_exec_status": kitex.NewMethodInfo(getRealtimeExecStatusHandler, newBackendServiceGetRealtimeExecStatusArgs, newBackendServiceGetRealtimeExecStatusResult, false),
}
extra := map[string]interface{}{
- "PackageName": "backendservice",
+ "PackageName": "backendservice",
+ "ServiceFilePath": `thrift/BackendService.thrift`,
}
svcInfo := &kitex.ServiceInfo{
ServiceName: serviceName,
HandlerType: handlerType,
Methods: methods,
PayloadCodec: kitex.Thrift,
- KiteXGenVersion: "v0.4.4",
+ KiteXGenVersion: "v0.8.0",
Extra: extra,
}
return svcInfo
@@ -384,35 +392,112 @@ func newBackendServiceGetStreamLoadRecordResult() interface{} {
return backendservice.NewBackendServiceGetStreamLoadRecordResult()
}
-func cleanTrashHandler(ctx context.Context, handler interface{}, arg, result interface{}) error {
+func checkStorageFormatHandler(ctx context.Context, handler interface{}, arg, result interface{}) error {
- err := handler.(backendservice.BackendService).CleanTrash(ctx)
+ realResult := result.(*backendservice.BackendServiceCheckStorageFormatResult)
+ success, err := handler.(backendservice.BackendService).CheckStorageFormat(ctx)
if err != nil {
return err
}
+ realResult.Success = success
+ return nil
+}
+func newBackendServiceCheckStorageFormatArgs() interface{} {
+ return backendservice.NewBackendServiceCheckStorageFormatArgs()
+}
+func newBackendServiceCheckStorageFormatResult() interface{} {
+ return backendservice.NewBackendServiceCheckStorageFormatResult()
+}
+
+func warmUpCacheAsyncHandler(ctx context.Context, handler interface{}, arg, result interface{}) error {
+ realArg := arg.(*backendservice.BackendServiceWarmUpCacheAsyncArgs)
+ realResult := result.(*backendservice.BackendServiceWarmUpCacheAsyncResult)
+ success, err := handler.(backendservice.BackendService).WarmUpCacheAsync(ctx, realArg.Request)
+ if err != nil {
+ return err
+ }
+ realResult.Success = success
return nil
}
-func newBackendServiceCleanTrashArgs() interface{} {
- return backendservice.NewBackendServiceCleanTrashArgs()
+func newBackendServiceWarmUpCacheAsyncArgs() interface{} {
+ return backendservice.NewBackendServiceWarmUpCacheAsyncArgs()
}
-func checkStorageFormatHandler(ctx context.Context, handler interface{}, arg, result interface{}) error {
+func newBackendServiceWarmUpCacheAsyncResult() interface{} {
+ return backendservice.NewBackendServiceWarmUpCacheAsyncResult()
+}
- realResult := result.(*backendservice.BackendServiceCheckStorageFormatResult)
- success, err := handler.(backendservice.BackendService).CheckStorageFormat(ctx)
+func checkWarmUpCacheAsyncHandler(ctx context.Context, handler interface{}, arg, result interface{}) error {
+ realArg := arg.(*backendservice.BackendServiceCheckWarmUpCacheAsyncArgs)
+ realResult := result.(*backendservice.BackendServiceCheckWarmUpCacheAsyncResult)
+ success, err := handler.(backendservice.BackendService).CheckWarmUpCacheAsync(ctx, realArg.Request)
if err != nil {
return err
}
realResult.Success = success
return nil
}
-func newBackendServiceCheckStorageFormatArgs() interface{} {
- return backendservice.NewBackendServiceCheckStorageFormatArgs()
+func newBackendServiceCheckWarmUpCacheAsyncArgs() interface{} {
+ return backendservice.NewBackendServiceCheckWarmUpCacheAsyncArgs()
}
-func newBackendServiceCheckStorageFormatResult() interface{} {
- return backendservice.NewBackendServiceCheckStorageFormatResult()
+func newBackendServiceCheckWarmUpCacheAsyncResult() interface{} {
+ return backendservice.NewBackendServiceCheckWarmUpCacheAsyncResult()
+}
+
+func syncLoadForTabletsHandler(ctx context.Context, handler interface{}, arg, result interface{}) error {
+ realArg := arg.(*backendservice.BackendServiceSyncLoadForTabletsArgs)
+ realResult := result.(*backendservice.BackendServiceSyncLoadForTabletsResult)
+ success, err := handler.(backendservice.BackendService).SyncLoadForTablets(ctx, realArg.Request)
+ if err != nil {
+ return err
+ }
+ realResult.Success = success
+ return nil
+}
+func newBackendServiceSyncLoadForTabletsArgs() interface{} {
+ return backendservice.NewBackendServiceSyncLoadForTabletsArgs()
+}
+
+func newBackendServiceSyncLoadForTabletsResult() interface{} {
+ return backendservice.NewBackendServiceSyncLoadForTabletsResult()
+}
+
+func getTopNHotPartitionsHandler(ctx context.Context, handler interface{}, arg, result interface{}) error {
+ realArg := arg.(*backendservice.BackendServiceGetTopNHotPartitionsArgs)
+ realResult := result.(*backendservice.BackendServiceGetTopNHotPartitionsResult)
+ success, err := handler.(backendservice.BackendService).GetTopNHotPartitions(ctx, realArg.Request)
+ if err != nil {
+ return err
+ }
+ realResult.Success = success
+ return nil
+}
+func newBackendServiceGetTopNHotPartitionsArgs() interface{} {
+ return backendservice.NewBackendServiceGetTopNHotPartitionsArgs()
+}
+
+func newBackendServiceGetTopNHotPartitionsResult() interface{} {
+ return backendservice.NewBackendServiceGetTopNHotPartitionsResult()
+}
+
+func warmUpTabletsHandler(ctx context.Context, handler interface{}, arg, result interface{}) error {
+ realArg := arg.(*backendservice.BackendServiceWarmUpTabletsArgs)
+ realResult := result.(*backendservice.BackendServiceWarmUpTabletsResult)
+ success, err := handler.(backendservice.BackendService).WarmUpTablets(ctx, realArg.Request)
+ if err != nil {
+ return err
+ }
+ realResult.Success = success
+ return nil
+}
+func newBackendServiceWarmUpTabletsArgs() interface{} {
+ return backendservice.NewBackendServiceWarmUpTabletsArgs()
+}
+
+func newBackendServiceWarmUpTabletsResult() interface{} {
+ return backendservice.NewBackendServiceWarmUpTabletsResult()
}
func ingestBinlogHandler(ctx context.Context, handler interface{}, arg, result interface{}) error {
@@ -433,6 +518,60 @@ func newBackendServiceIngestBinlogResult() interface{} {
return backendservice.NewBackendServiceIngestBinlogResult()
}
+func queryIngestBinlogHandler(ctx context.Context, handler interface{}, arg, result interface{}) error {
+ realArg := arg.(*backendservice.BackendServiceQueryIngestBinlogArgs)
+ realResult := result.(*backendservice.BackendServiceQueryIngestBinlogResult)
+ success, err := handler.(backendservice.BackendService).QueryIngestBinlog(ctx, realArg.QueryIngestBinlogRequest)
+ if err != nil {
+ return err
+ }
+ realResult.Success = success
+ return nil
+}
+func newBackendServiceQueryIngestBinlogArgs() interface{} {
+ return backendservice.NewBackendServiceQueryIngestBinlogArgs()
+}
+
+func newBackendServiceQueryIngestBinlogResult() interface{} {
+ return backendservice.NewBackendServiceQueryIngestBinlogResult()
+}
+
+func publishTopicInfoHandler(ctx context.Context, handler interface{}, arg, result interface{}) error {
+ realArg := arg.(*backendservice.BackendServicePublishTopicInfoArgs)
+ realResult := result.(*backendservice.BackendServicePublishTopicInfoResult)
+ success, err := handler.(backendservice.BackendService).PublishTopicInfo(ctx, realArg.TopicRequest)
+ if err != nil {
+ return err
+ }
+ realResult.Success = success
+ return nil
+}
+func newBackendServicePublishTopicInfoArgs() interface{} {
+ return backendservice.NewBackendServicePublishTopicInfoArgs()
+}
+
+func newBackendServicePublishTopicInfoResult() interface{} {
+ return backendservice.NewBackendServicePublishTopicInfoResult()
+}
+
+func getRealtimeExecStatusHandler(ctx context.Context, handler interface{}, arg, result interface{}) error {
+ realArg := arg.(*backendservice.BackendServiceGetRealtimeExecStatusArgs)
+ realResult := result.(*backendservice.BackendServiceGetRealtimeExecStatusResult)
+ success, err := handler.(backendservice.BackendService).GetRealtimeExecStatus(ctx, realArg.Request)
+ if err != nil {
+ return err
+ }
+ realResult.Success = success
+ return nil
+}
+func newBackendServiceGetRealtimeExecStatusArgs() interface{} {
+ return backendservice.NewBackendServiceGetRealtimeExecStatusArgs()
+}
+
+func newBackendServiceGetRealtimeExecStatusResult() interface{} {
+ return backendservice.NewBackendServiceGetRealtimeExecStatusResult()
+}
+
type kClient struct {
c client.Client
}
@@ -620,14 +759,6 @@ func (p *kClient) GetStreamLoadRecord(ctx context.Context, lastStreamRecordTime
return _result.GetSuccess(), nil
}
-func (p *kClient) CleanTrash(ctx context.Context) (err error) {
- var _args backendservice.BackendServiceCleanTrashArgs
- if err = p.c.Call(ctx, "clean_trash", &_args, nil); err != nil {
- return
- }
- return nil
-}
-
func (p *kClient) CheckStorageFormat(ctx context.Context) (r *backendservice.TCheckStorageFormatResult_, err error) {
var _args backendservice.BackendServiceCheckStorageFormatArgs
var _result backendservice.BackendServiceCheckStorageFormatResult
@@ -637,6 +768,56 @@ func (p *kClient) CheckStorageFormat(ctx context.Context) (r *backendservice.TCh
return _result.GetSuccess(), nil
}
+func (p *kClient) WarmUpCacheAsync(ctx context.Context, request *backendservice.TWarmUpCacheAsyncRequest) (r *backendservice.TWarmUpCacheAsyncResponse, err error) {
+ var _args backendservice.BackendServiceWarmUpCacheAsyncArgs
+ _args.Request = request
+ var _result backendservice.BackendServiceWarmUpCacheAsyncResult
+ if err = p.c.Call(ctx, "warm_up_cache_async", &_args, &_result); err != nil {
+ return
+ }
+ return _result.GetSuccess(), nil
+}
+
+func (p *kClient) CheckWarmUpCacheAsync(ctx context.Context, request *backendservice.TCheckWarmUpCacheAsyncRequest) (r *backendservice.TCheckWarmUpCacheAsyncResponse, err error) {
+ var _args backendservice.BackendServiceCheckWarmUpCacheAsyncArgs
+ _args.Request = request
+ var _result backendservice.BackendServiceCheckWarmUpCacheAsyncResult
+ if err = p.c.Call(ctx, "check_warm_up_cache_async", &_args, &_result); err != nil {
+ return
+ }
+ return _result.GetSuccess(), nil
+}
+
+func (p *kClient) SyncLoadForTablets(ctx context.Context, request *backendservice.TSyncLoadForTabletsRequest) (r *backendservice.TSyncLoadForTabletsResponse, err error) {
+ var _args backendservice.BackendServiceSyncLoadForTabletsArgs
+ _args.Request = request
+ var _result backendservice.BackendServiceSyncLoadForTabletsResult
+ if err = p.c.Call(ctx, "sync_load_for_tablets", &_args, &_result); err != nil {
+ return
+ }
+ return _result.GetSuccess(), nil
+}
+
+func (p *kClient) GetTopNHotPartitions(ctx context.Context, request *backendservice.TGetTopNHotPartitionsRequest) (r *backendservice.TGetTopNHotPartitionsResponse, err error) {
+ var _args backendservice.BackendServiceGetTopNHotPartitionsArgs
+ _args.Request = request
+ var _result backendservice.BackendServiceGetTopNHotPartitionsResult
+ if err = p.c.Call(ctx, "get_top_n_hot_partitions", &_args, &_result); err != nil {
+ return
+ }
+ return _result.GetSuccess(), nil
+}
+
+func (p *kClient) WarmUpTablets(ctx context.Context, request *backendservice.TWarmUpTabletsRequest) (r *backendservice.TWarmUpTabletsResponse, err error) {
+ var _args backendservice.BackendServiceWarmUpTabletsArgs
+ _args.Request = request
+ var _result backendservice.BackendServiceWarmUpTabletsResult
+ if err = p.c.Call(ctx, "warm_up_tablets", &_args, &_result); err != nil {
+ return
+ }
+ return _result.GetSuccess(), nil
+}
+
func (p *kClient) IngestBinlog(ctx context.Context, ingestBinlogRequest *backendservice.TIngestBinlogRequest) (r *backendservice.TIngestBinlogResult_, err error) {
var _args backendservice.BackendServiceIngestBinlogArgs
_args.IngestBinlogRequest = ingestBinlogRequest
@@ -646,3 +827,33 @@ func (p *kClient) IngestBinlog(ctx context.Context, ingestBinlogRequest *backend
}
return _result.GetSuccess(), nil
}
+
+func (p *kClient) QueryIngestBinlog(ctx context.Context, queryIngestBinlogRequest *backendservice.TQueryIngestBinlogRequest) (r *backendservice.TQueryIngestBinlogResult_, err error) {
+ var _args backendservice.BackendServiceQueryIngestBinlogArgs
+ _args.QueryIngestBinlogRequest = queryIngestBinlogRequest
+ var _result backendservice.BackendServiceQueryIngestBinlogResult
+ if err = p.c.Call(ctx, "query_ingest_binlog", &_args, &_result); err != nil {
+ return
+ }
+ return _result.GetSuccess(), nil
+}
+
+func (p *kClient) PublishTopicInfo(ctx context.Context, topicRequest *backendservice.TPublishTopicRequest) (r *backendservice.TPublishTopicResult_, err error) {
+ var _args backendservice.BackendServicePublishTopicInfoArgs
+ _args.TopicRequest = topicRequest
+ var _result backendservice.BackendServicePublishTopicInfoResult
+ if err = p.c.Call(ctx, "publish_topic_info", &_args, &_result); err != nil {
+ return
+ }
+ return _result.GetSuccess(), nil
+}
+
+func (p *kClient) GetRealtimeExecStatus(ctx context.Context, request *backendservice.TGetRealtimeExecStatusRequest) (r *backendservice.TGetRealtimeExecStatusResponse, err error) {
+ var _args backendservice.BackendServiceGetRealtimeExecStatusArgs
+ _args.Request = request
+ var _result backendservice.BackendServiceGetRealtimeExecStatusResult
+ if err = p.c.Call(ctx, "get_realtime_exec_status", &_args, &_result); err != nil {
+ return
+ }
+ return _result.GetSuccess(), nil
+}
diff --git a/pkg/rpc/kitex_gen/backendservice/backendservice/client.go b/pkg/rpc/kitex_gen/backendservice/backendservice/client.go
index 1481b215..e001d2bd 100644
--- a/pkg/rpc/kitex_gen/backendservice/backendservice/client.go
+++ b/pkg/rpc/kitex_gen/backendservice/backendservice/client.go
@@ -1,4 +1,4 @@
-// Code generated by Kitex v0.4.4. DO NOT EDIT.
+// Code generated by Kitex v0.8.0. DO NOT EDIT.
package backendservice
@@ -34,9 +34,16 @@ type Client interface {
GetNext(ctx context.Context, params *dorisexternalservice.TScanNextBatchParams, callOptions ...callopt.Option) (r *dorisexternalservice.TScanBatchResult_, err error)
CloseScanner(ctx context.Context, params *dorisexternalservice.TScanCloseParams, callOptions ...callopt.Option) (r *dorisexternalservice.TScanCloseResult_, err error)
GetStreamLoadRecord(ctx context.Context, lastStreamRecordTime int64, callOptions ...callopt.Option) (r *backendservice.TStreamLoadRecordResult_, err error)
- CleanTrash(ctx context.Context, callOptions ...callopt.Option) (err error)
CheckStorageFormat(ctx context.Context, callOptions ...callopt.Option) (r *backendservice.TCheckStorageFormatResult_, err error)
+ WarmUpCacheAsync(ctx context.Context, request *backendservice.TWarmUpCacheAsyncRequest, callOptions ...callopt.Option) (r *backendservice.TWarmUpCacheAsyncResponse, err error)
+ CheckWarmUpCacheAsync(ctx context.Context, request *backendservice.TCheckWarmUpCacheAsyncRequest, callOptions ...callopt.Option) (r *backendservice.TCheckWarmUpCacheAsyncResponse, err error)
+ SyncLoadForTablets(ctx context.Context, request *backendservice.TSyncLoadForTabletsRequest, callOptions ...callopt.Option) (r *backendservice.TSyncLoadForTabletsResponse, err error)
+ GetTopNHotPartitions(ctx context.Context, request *backendservice.TGetTopNHotPartitionsRequest, callOptions ...callopt.Option) (r *backendservice.TGetTopNHotPartitionsResponse, err error)
+ WarmUpTablets(ctx context.Context, request *backendservice.TWarmUpTabletsRequest, callOptions ...callopt.Option) (r *backendservice.TWarmUpTabletsResponse, err error)
IngestBinlog(ctx context.Context, ingestBinlogRequest *backendservice.TIngestBinlogRequest, callOptions ...callopt.Option) (r *backendservice.TIngestBinlogResult_, err error)
+ QueryIngestBinlog(ctx context.Context, queryIngestBinlogRequest *backendservice.TQueryIngestBinlogRequest, callOptions ...callopt.Option) (r *backendservice.TQueryIngestBinlogResult_, err error)
+ PublishTopicInfo(ctx context.Context, topicRequest *backendservice.TPublishTopicRequest, callOptions ...callopt.Option) (r *backendservice.TPublishTopicResult_, err error)
+ GetRealtimeExecStatus(ctx context.Context, request *backendservice.TGetRealtimeExecStatusRequest, callOptions ...callopt.Option) (r *backendservice.TGetRealtimeExecStatusResponse, err error)
}
// NewClient creates a client for the service defined in IDL.
@@ -158,17 +165,52 @@ func (p *kBackendServiceClient) GetStreamLoadRecord(ctx context.Context, lastStr
return p.kClient.GetStreamLoadRecord(ctx, lastStreamRecordTime)
}
-func (p *kBackendServiceClient) CleanTrash(ctx context.Context, callOptions ...callopt.Option) (err error) {
+func (p *kBackendServiceClient) CheckStorageFormat(ctx context.Context, callOptions ...callopt.Option) (r *backendservice.TCheckStorageFormatResult_, err error) {
ctx = client.NewCtxWithCallOptions(ctx, callOptions)
- return p.kClient.CleanTrash(ctx)
+ return p.kClient.CheckStorageFormat(ctx)
}
-func (p *kBackendServiceClient) CheckStorageFormat(ctx context.Context, callOptions ...callopt.Option) (r *backendservice.TCheckStorageFormatResult_, err error) {
+func (p *kBackendServiceClient) WarmUpCacheAsync(ctx context.Context, request *backendservice.TWarmUpCacheAsyncRequest, callOptions ...callopt.Option) (r *backendservice.TWarmUpCacheAsyncResponse, err error) {
ctx = client.NewCtxWithCallOptions(ctx, callOptions)
- return p.kClient.CheckStorageFormat(ctx)
+ return p.kClient.WarmUpCacheAsync(ctx, request)
+}
+
+func (p *kBackendServiceClient) CheckWarmUpCacheAsync(ctx context.Context, request *backendservice.TCheckWarmUpCacheAsyncRequest, callOptions ...callopt.Option) (r *backendservice.TCheckWarmUpCacheAsyncResponse, err error) {
+ ctx = client.NewCtxWithCallOptions(ctx, callOptions)
+ return p.kClient.CheckWarmUpCacheAsync(ctx, request)
+}
+
+func (p *kBackendServiceClient) SyncLoadForTablets(ctx context.Context, request *backendservice.TSyncLoadForTabletsRequest, callOptions ...callopt.Option) (r *backendservice.TSyncLoadForTabletsResponse, err error) {
+ ctx = client.NewCtxWithCallOptions(ctx, callOptions)
+ return p.kClient.SyncLoadForTablets(ctx, request)
+}
+
+func (p *kBackendServiceClient) GetTopNHotPartitions(ctx context.Context, request *backendservice.TGetTopNHotPartitionsRequest, callOptions ...callopt.Option) (r *backendservice.TGetTopNHotPartitionsResponse, err error) {
+ ctx = client.NewCtxWithCallOptions(ctx, callOptions)
+ return p.kClient.GetTopNHotPartitions(ctx, request)
+}
+
+func (p *kBackendServiceClient) WarmUpTablets(ctx context.Context, request *backendservice.TWarmUpTabletsRequest, callOptions ...callopt.Option) (r *backendservice.TWarmUpTabletsResponse, err error) {
+ ctx = client.NewCtxWithCallOptions(ctx, callOptions)
+ return p.kClient.WarmUpTablets(ctx, request)
}
func (p *kBackendServiceClient) IngestBinlog(ctx context.Context, ingestBinlogRequest *backendservice.TIngestBinlogRequest, callOptions ...callopt.Option) (r *backendservice.TIngestBinlogResult_, err error) {
ctx = client.NewCtxWithCallOptions(ctx, callOptions)
return p.kClient.IngestBinlog(ctx, ingestBinlogRequest)
}
+
+func (p *kBackendServiceClient) QueryIngestBinlog(ctx context.Context, queryIngestBinlogRequest *backendservice.TQueryIngestBinlogRequest, callOptions ...callopt.Option) (r *backendservice.TQueryIngestBinlogResult_, err error) {
+ ctx = client.NewCtxWithCallOptions(ctx, callOptions)
+ return p.kClient.QueryIngestBinlog(ctx, queryIngestBinlogRequest)
+}
+
+func (p *kBackendServiceClient) PublishTopicInfo(ctx context.Context, topicRequest *backendservice.TPublishTopicRequest, callOptions ...callopt.Option) (r *backendservice.TPublishTopicResult_, err error) {
+ ctx = client.NewCtxWithCallOptions(ctx, callOptions)
+ return p.kClient.PublishTopicInfo(ctx, topicRequest)
+}
+
+func (p *kBackendServiceClient) GetRealtimeExecStatus(ctx context.Context, request *backendservice.TGetRealtimeExecStatusRequest, callOptions ...callopt.Option) (r *backendservice.TGetRealtimeExecStatusResponse, err error) {
+ ctx = client.NewCtxWithCallOptions(ctx, callOptions)
+ return p.kClient.GetRealtimeExecStatus(ctx, request)
+}
diff --git a/pkg/rpc/kitex_gen/backendservice/backendservice/invoker.go b/pkg/rpc/kitex_gen/backendservice/backendservice/invoker.go
index bc7108dc..e38cd4f8 100644
--- a/pkg/rpc/kitex_gen/backendservice/backendservice/invoker.go
+++ b/pkg/rpc/kitex_gen/backendservice/backendservice/invoker.go
@@ -1,4 +1,4 @@
-// Code generated by Kitex v0.4.4. DO NOT EDIT.
+// Code generated by Kitex v0.8.0. DO NOT EDIT.
package backendservice
diff --git a/pkg/rpc/kitex_gen/backendservice/backendservice/server.go b/pkg/rpc/kitex_gen/backendservice/backendservice/server.go
index 228b2335..c10bd073 100644
--- a/pkg/rpc/kitex_gen/backendservice/backendservice/server.go
+++ b/pkg/rpc/kitex_gen/backendservice/backendservice/server.go
@@ -1,4 +1,4 @@
-// Code generated by Kitex v0.4.4. DO NOT EDIT.
+// Code generated by Kitex v0.8.0. DO NOT EDIT.
package backendservice
import (
diff --git a/pkg/rpc/kitex_gen/backendservice/k-BackendService.go b/pkg/rpc/kitex_gen/backendservice/k-BackendService.go
index 3029f170..08b0b5d9 100644
--- a/pkg/rpc/kitex_gen/backendservice/k-BackendService.go
+++ b/pkg/rpc/kitex_gen/backendservice/k-BackendService.go
@@ -1,4 +1,4 @@
-// Code generated by Kitex v0.4.4. DO NOT EDIT.
+// Code generated by Kitex v0.8.0. DO NOT EDIT.
package backendservice
@@ -11,8 +11,10 @@ import (
"github.com/apache/thrift/lib/go/thrift"
"github.com/cloudwego/kitex/pkg/protocol/bthrift"
+
"github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/agentservice"
"github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/dorisexternalservice"
+ "github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/frontendservice"
"github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/palointernalservice"
"github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/plannodes"
"github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/status"
@@ -29,6 +31,7 @@ var (
_ = bthrift.BinaryWriter(nil)
_ = agentservice.KitexUnusedProtection
_ = dorisexternalservice.KitexUnusedProtection
+ _ = frontendservice.KitexUnusedProtection
_ = palointernalservice.KitexUnusedProtection
_ = plannodes.KitexUnusedProtection
_ = status.KitexUnusedProtection
@@ -264,6 +267,34 @@ func (p *TTabletStat) FastRead(buf []byte) (int, error) {
goto SkipFieldError
}
}
+ case 6:
+ if fieldTypeId == thrift.I64 {
+ l, err = p.FastReadField6(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+ case 7:
+ if fieldTypeId == thrift.I64 {
+ l, err = p.FastReadField7(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
default:
l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
offset += l
@@ -339,7 +370,7 @@ func (p *TTabletStat) FastReadField3(buf []byte) (int, error) {
return offset, err
} else {
offset += l
- p.RowNum = &v
+ p.RowCount = &v
}
return offset, nil
@@ -352,7 +383,7 @@ func (p *TTabletStat) FastReadField4(buf []byte) (int, error) {
return offset, err
} else {
offset += l
- p.VersionCount = &v
+ p.TotalVersionCount = &v
}
return offset, nil
@@ -371,6 +402,32 @@ func (p *TTabletStat) FastReadField5(buf []byte) (int, error) {
return offset, nil
}
+func (p *TTabletStat) FastReadField6(buf []byte) (int, error) {
+ offset := 0
+
+ if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+ p.VisibleVersionCount = &v
+
+ }
+ return offset, nil
+}
+
+func (p *TTabletStat) FastReadField7(buf []byte) (int, error) {
+ offset := 0
+
+ if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+ p.VisibleVersion = &v
+
+ }
+ return offset, nil
+}
+
// for compatibility
func (p *TTabletStat) FastWrite(buf []byte) int {
return 0
@@ -385,6 +442,8 @@ func (p *TTabletStat) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWri
offset += p.fastWriteField3(buf[offset:], binaryWriter)
offset += p.fastWriteField4(buf[offset:], binaryWriter)
offset += p.fastWriteField5(buf[offset:], binaryWriter)
+ offset += p.fastWriteField6(buf[offset:], binaryWriter)
+ offset += p.fastWriteField7(buf[offset:], binaryWriter)
}
offset += bthrift.Binary.WriteFieldStop(buf[offset:])
offset += bthrift.Binary.WriteStructEnd(buf[offset:])
@@ -400,6 +459,8 @@ func (p *TTabletStat) BLength() int {
l += p.field3Length()
l += p.field4Length()
l += p.field5Length()
+ l += p.field6Length()
+ l += p.field7Length()
}
l += bthrift.Binary.FieldStopLength()
l += bthrift.Binary.StructEndLength()
@@ -428,9 +489,9 @@ func (p *TTabletStat) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWri
func (p *TTabletStat) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int {
offset := 0
- if p.IsSetRowNum() {
- offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "row_num", thrift.I64, 3)
- offset += bthrift.Binary.WriteI64(buf[offset:], *p.RowNum)
+ if p.IsSetRowCount() {
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "row_count", thrift.I64, 3)
+ offset += bthrift.Binary.WriteI64(buf[offset:], *p.RowCount)
offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
}
@@ -439,9 +500,9 @@ func (p *TTabletStat) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWri
func (p *TTabletStat) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int {
offset := 0
- if p.IsSetVersionCount() {
- offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "version_count", thrift.I64, 4)
- offset += bthrift.Binary.WriteI64(buf[offset:], *p.VersionCount)
+ if p.IsSetTotalVersionCount() {
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "total_version_count", thrift.I64, 4)
+ offset += bthrift.Binary.WriteI64(buf[offset:], *p.TotalVersionCount)
offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
}
@@ -459,6 +520,28 @@ func (p *TTabletStat) fastWriteField5(buf []byte, binaryWriter bthrift.BinaryWri
return offset
}
+func (p *TTabletStat) fastWriteField6(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ if p.IsSetVisibleVersionCount() {
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "visible_version_count", thrift.I64, 6)
+ offset += bthrift.Binary.WriteI64(buf[offset:], *p.VisibleVersionCount)
+
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ }
+ return offset
+}
+
+func (p *TTabletStat) fastWriteField7(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ if p.IsSetVisibleVersion() {
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "visible_version", thrift.I64, 7)
+ offset += bthrift.Binary.WriteI64(buf[offset:], *p.VisibleVersion)
+
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ }
+ return offset
+}
+
func (p *TTabletStat) field1Length() int {
l := 0
l += bthrift.Binary.FieldBeginLength("tablet_id", thrift.I64, 1)
@@ -481,9 +564,9 @@ func (p *TTabletStat) field2Length() int {
func (p *TTabletStat) field3Length() int {
l := 0
- if p.IsSetRowNum() {
- l += bthrift.Binary.FieldBeginLength("row_num", thrift.I64, 3)
- l += bthrift.Binary.I64Length(*p.RowNum)
+ if p.IsSetRowCount() {
+ l += bthrift.Binary.FieldBeginLength("row_count", thrift.I64, 3)
+ l += bthrift.Binary.I64Length(*p.RowCount)
l += bthrift.Binary.FieldEndLength()
}
@@ -492,9 +575,9 @@ func (p *TTabletStat) field3Length() int {
func (p *TTabletStat) field4Length() int {
l := 0
- if p.IsSetVersionCount() {
- l += bthrift.Binary.FieldBeginLength("version_count", thrift.I64, 4)
- l += bthrift.Binary.I64Length(*p.VersionCount)
+ if p.IsSetTotalVersionCount() {
+ l += bthrift.Binary.FieldBeginLength("total_version_count", thrift.I64, 4)
+ l += bthrift.Binary.I64Length(*p.TotalVersionCount)
l += bthrift.Binary.FieldEndLength()
}
@@ -512,6 +595,28 @@ func (p *TTabletStat) field5Length() int {
return l
}
+func (p *TTabletStat) field6Length() int {
+ l := 0
+ if p.IsSetVisibleVersionCount() {
+ l += bthrift.Binary.FieldBeginLength("visible_version_count", thrift.I64, 6)
+ l += bthrift.Binary.I64Length(*p.VisibleVersionCount)
+
+ l += bthrift.Binary.FieldEndLength()
+ }
+ return l
+}
+
+func (p *TTabletStat) field7Length() int {
+ l := 0
+ if p.IsSetVisibleVersion() {
+ l += bthrift.Binary.FieldBeginLength("visible_version", thrift.I64, 7)
+ l += bthrift.Binary.I64Length(*p.VisibleVersion)
+
+ l += bthrift.Binary.FieldEndLength()
+ }
+ return l
+}
+
func (p *TTabletStatResult_) FastRead(buf []byte) (int, error) {
var err error
var offset int
@@ -1403,6 +1508,48 @@ func (p *TRoutineLoadTask) FastRead(buf []byte) (int, error) {
goto SkipFieldError
}
}
+ case 17:
+ if fieldTypeId == thrift.BOOL {
+ l, err = p.FastReadField17(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+ case 18:
+ if fieldTypeId == thrift.STRING {
+ l, err = p.FastReadField18(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+ case 19:
+ if fieldTypeId == thrift.STRING {
+ l, err = p.FastReadField19(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
default:
l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
offset += l
@@ -1678,6 +1825,45 @@ func (p *TRoutineLoadTask) FastReadField16(buf []byte) (int, error) {
return offset, nil
}
+func (p *TRoutineLoadTask) FastReadField17(buf []byte) (int, error) {
+ offset := 0
+
+ if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+ p.MemtableOnSinkNode = &v
+
+ }
+ return offset, nil
+}
+
+func (p *TRoutineLoadTask) FastReadField18(buf []byte) (int, error) {
+ offset := 0
+
+ if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+ p.QualifiedUser = &v
+
+ }
+ return offset, nil
+}
+
+func (p *TRoutineLoadTask) FastReadField19(buf []byte) (int, error) {
+ offset := 0
+
+ if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+ p.CloudCluster = &v
+
+ }
+ return offset, nil
+}
+
// for compatibility
func (p *TRoutineLoadTask) FastWrite(buf []byte) int {
return 0
@@ -1694,6 +1880,7 @@ func (p *TRoutineLoadTask) FastWriteNocopy(buf []byte, binaryWriter bthrift.Bina
offset += p.fastWriteField10(buf[offset:], binaryWriter)
offset += p.fastWriteField11(buf[offset:], binaryWriter)
offset += p.fastWriteField16(buf[offset:], binaryWriter)
+ offset += p.fastWriteField17(buf[offset:], binaryWriter)
offset += p.fastWriteField1(buf[offset:], binaryWriter)
offset += p.fastWriteField3(buf[offset:], binaryWriter)
offset += p.fastWriteField6(buf[offset:], binaryWriter)
@@ -1703,6 +1890,8 @@ func (p *TRoutineLoadTask) FastWriteNocopy(buf []byte, binaryWriter bthrift.Bina
offset += p.fastWriteField13(buf[offset:], binaryWriter)
offset += p.fastWriteField14(buf[offset:], binaryWriter)
offset += p.fastWriteField15(buf[offset:], binaryWriter)
+ offset += p.fastWriteField18(buf[offset:], binaryWriter)
+ offset += p.fastWriteField19(buf[offset:], binaryWriter)
}
offset += bthrift.Binary.WriteFieldStop(buf[offset:])
offset += bthrift.Binary.WriteStructEnd(buf[offset:])
@@ -1729,6 +1918,9 @@ func (p *TRoutineLoadTask) BLength() int {
l += p.field14Length()
l += p.field15Length()
l += p.field16Length()
+ l += p.field17Length()
+ l += p.field18Length()
+ l += p.field19Length()
}
l += bthrift.Binary.FieldStopLength()
l += bthrift.Binary.StructEndLength()
@@ -1897,6 +2089,39 @@ func (p *TRoutineLoadTask) fastWriteField16(buf []byte, binaryWriter bthrift.Bin
return offset
}
+func (p *TRoutineLoadTask) fastWriteField17(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ if p.IsSetMemtableOnSinkNode() {
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "memtable_on_sink_node", thrift.BOOL, 17)
+ offset += bthrift.Binary.WriteBool(buf[offset:], *p.MemtableOnSinkNode)
+
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ }
+ return offset
+}
+
+func (p *TRoutineLoadTask) fastWriteField18(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ if p.IsSetQualifiedUser() {
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "qualified_user", thrift.STRING, 18)
+ offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.QualifiedUser)
+
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ }
+ return offset
+}
+
+func (p *TRoutineLoadTask) fastWriteField19(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ if p.IsSetCloudCluster() {
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "cloud_cluster", thrift.STRING, 19)
+ offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.CloudCluster)
+
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ }
+ return offset
+}
+
func (p *TRoutineLoadTask) field1Length() int {
l := 0
l += bthrift.Binary.FieldBeginLength("type", thrift.I32, 1)
@@ -2059,6 +2284,39 @@ func (p *TRoutineLoadTask) field16Length() int {
return l
}
+func (p *TRoutineLoadTask) field17Length() int {
+ l := 0
+ if p.IsSetMemtableOnSinkNode() {
+ l += bthrift.Binary.FieldBeginLength("memtable_on_sink_node", thrift.BOOL, 17)
+ l += bthrift.Binary.BoolLength(*p.MemtableOnSinkNode)
+
+ l += bthrift.Binary.FieldEndLength()
+ }
+ return l
+}
+
+func (p *TRoutineLoadTask) field18Length() int {
+ l := 0
+ if p.IsSetQualifiedUser() {
+ l += bthrift.Binary.FieldBeginLength("qualified_user", thrift.STRING, 18)
+ l += bthrift.Binary.StringLengthNocopy(*p.QualifiedUser)
+
+ l += bthrift.Binary.FieldEndLength()
+ }
+ return l
+}
+
+func (p *TRoutineLoadTask) field19Length() int {
+ l := 0
+ if p.IsSetCloudCluster() {
+ l += bthrift.Binary.FieldBeginLength("cloud_cluster", thrift.STRING, 19)
+ l += bthrift.Binary.StringLengthNocopy(*p.CloudCluster)
+
+ l += bthrift.Binary.FieldEndLength()
+ }
+ return l
+}
+
func (p *TKafkaMetaProxyRequest) FastRead(buf []byte) (int, error) {
var err error
var offset int
@@ -4435,12 +4693,15 @@ func (p *TCheckStorageFormatResult_) field2Length() int {
return l
}
-func (p *TIngestBinlogRequest) FastRead(buf []byte) (int, error) {
+func (p *TWarmUpCacheAsyncRequest) FastRead(buf []byte) (int, error) {
var err error
var offset int
var l int
var fieldTypeId thrift.TType
var fieldId int16
+ var issetHost bool = false
+ var issetBrpcPort bool = false
+ var issetTabletIds bool = false
_, l, err = bthrift.Binary.ReadStructBegin(buf)
offset += l
if err != nil {
@@ -4458,12 +4719,13 @@ func (p *TIngestBinlogRequest) FastRead(buf []byte) (int, error) {
}
switch fieldId {
case 1:
- if fieldTypeId == thrift.I64 {
+ if fieldTypeId == thrift.STRING {
l, err = p.FastReadField1(buf[offset:])
offset += l
if err != nil {
goto ReadFieldError
}
+ issetHost = true
} else {
l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
offset += l
@@ -4472,12 +4734,13 @@ func (p *TIngestBinlogRequest) FastRead(buf []byte) (int, error) {
}
}
case 2:
- if fieldTypeId == thrift.I64 {
+ if fieldTypeId == thrift.I32 {
l, err = p.FastReadField2(buf[offset:])
offset += l
if err != nil {
goto ReadFieldError
}
+ issetBrpcPort = true
} else {
l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
offset += l
@@ -4486,12 +4749,13 @@ func (p *TIngestBinlogRequest) FastRead(buf []byte) (int, error) {
}
}
case 3:
- if fieldTypeId == thrift.I64 {
+ if fieldTypeId == thrift.LIST {
l, err = p.FastReadField3(buf[offset:])
offset += l
if err != nil {
goto ReadFieldError
}
+ issetTabletIds = true
} else {
l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
offset += l
@@ -4499,81 +4763,11 @@ func (p *TIngestBinlogRequest) FastRead(buf []byte) (int, error) {
goto SkipFieldError
}
}
- case 4:
- if fieldTypeId == thrift.STRING {
- l, err = p.FastReadField4(buf[offset:])
- offset += l
- if err != nil {
- goto ReadFieldError
- }
- } else {
- l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
- offset += l
- if err != nil {
- goto SkipFieldError
- }
- }
- case 5:
- if fieldTypeId == thrift.STRING {
- l, err = p.FastReadField5(buf[offset:])
- offset += l
- if err != nil {
- goto ReadFieldError
- }
- } else {
- l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
- offset += l
- if err != nil {
- goto SkipFieldError
- }
- }
- case 6:
- if fieldTypeId == thrift.I64 {
- l, err = p.FastReadField6(buf[offset:])
- offset += l
- if err != nil {
- goto ReadFieldError
- }
- } else {
- l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
- offset += l
- if err != nil {
- goto SkipFieldError
- }
- }
- case 7:
- if fieldTypeId == thrift.I64 {
- l, err = p.FastReadField7(buf[offset:])
- offset += l
- if err != nil {
- goto ReadFieldError
- }
- } else {
- l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
- offset += l
- if err != nil {
- goto SkipFieldError
- }
- }
- case 8:
- if fieldTypeId == thrift.STRUCT {
- l, err = p.FastReadField8(buf[offset:])
- offset += l
- if err != nil {
- goto ReadFieldError
- }
- } else {
- l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
- offset += l
- if err != nil {
- goto SkipFieldError
- }
- }
- default:
- l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
- offset += l
- if err != nil {
- goto SkipFieldError
+ default:
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
}
}
@@ -4589,341 +4783,8607 @@ func (p *TIngestBinlogRequest) FastRead(buf []byte) (int, error) {
goto ReadStructEndError
}
+ if !issetHost {
+ fieldId = 1
+ goto RequiredFieldNotSetError
+ }
+
+ if !issetBrpcPort {
+ fieldId = 2
+ goto RequiredFieldNotSetError
+ }
+
+ if !issetTabletIds {
+ fieldId = 3
+ goto RequiredFieldNotSetError
+ }
return offset, nil
ReadStructBeginError:
return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err)
ReadFieldBeginError:
return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
ReadFieldError:
- return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TIngestBinlogRequest[fieldId]), err)
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TWarmUpCacheAsyncRequest[fieldId]), err)
SkipFieldError:
return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
ReadFieldEndError:
return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err)
ReadStructEndError:
return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+RequiredFieldNotSetError:
+ return offset, thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TWarmUpCacheAsyncRequest[fieldId]))
}
-func (p *TIngestBinlogRequest) FastReadField1(buf []byte) (int, error) {
- offset := 0
-
- if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil {
- return offset, err
- } else {
- offset += l
- p.TxnId = &v
-
- }
- return offset, nil
-}
-
-func (p *TIngestBinlogRequest) FastReadField2(buf []byte) (int, error) {
+func (p *TWarmUpCacheAsyncRequest) FastReadField1(buf []byte) (int, error) {
offset := 0
- if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil {
+ if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil {
return offset, err
} else {
offset += l
- p.RemoteTabletId = &v
-
- }
- return offset, nil
-}
-
-func (p *TIngestBinlogRequest) FastReadField3(buf []byte) (int, error) {
- offset := 0
- if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil {
- return offset, err
- } else {
- offset += l
- p.BinlogVersion = &v
+ p.Host = v
}
return offset, nil
}
-func (p *TIngestBinlogRequest) FastReadField4(buf []byte) (int, error) {
+func (p *TWarmUpCacheAsyncRequest) FastReadField2(buf []byte) (int, error) {
offset := 0
- if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil {
+ if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil {
return offset, err
} else {
offset += l
- p.RemoteHost = &v
-
- }
- return offset, nil
-}
-
-func (p *TIngestBinlogRequest) FastReadField5(buf []byte) (int, error) {
- offset := 0
- if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil {
- return offset, err
- } else {
- offset += l
- p.RemotePort = &v
+ p.BrpcPort = v
}
return offset, nil
}
-func (p *TIngestBinlogRequest) FastReadField6(buf []byte) (int, error) {
+func (p *TWarmUpCacheAsyncRequest) FastReadField3(buf []byte) (int, error) {
offset := 0
- if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil {
+ _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:])
+ offset += l
+ if err != nil {
return offset, err
- } else {
- offset += l
- p.PartitionId = &v
-
}
- return offset, nil
-}
+ p.TabletIds = make([]int64, 0, size)
+ for i := 0; i < size; i++ {
+ var _elem int64
+ if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
-func (p *TIngestBinlogRequest) FastReadField7(buf []byte) (int, error) {
- offset := 0
+ _elem = v
- if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil {
- return offset, err
- } else {
- offset += l
- p.LocalTabletId = &v
+ }
+ p.TabletIds = append(p.TabletIds, _elem)
}
- return offset, nil
-}
-
-func (p *TIngestBinlogRequest) FastReadField8(buf []byte) (int, error) {
- offset := 0
-
- tmp := types.NewTUniqueId()
- if l, err := tmp.FastRead(buf[offset:]); err != nil {
+ if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil {
return offset, err
} else {
offset += l
}
- p.LoadId = tmp
return offset, nil
}
// for compatibility
-func (p *TIngestBinlogRequest) FastWrite(buf []byte) int {
+func (p *TWarmUpCacheAsyncRequest) FastWrite(buf []byte) int {
return 0
}
-func (p *TIngestBinlogRequest) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+func (p *TWarmUpCacheAsyncRequest) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int {
offset := 0
- offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TIngestBinlogRequest")
+ offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TWarmUpCacheAsyncRequest")
if p != nil {
- offset += p.fastWriteField1(buf[offset:], binaryWriter)
offset += p.fastWriteField2(buf[offset:], binaryWriter)
+ offset += p.fastWriteField1(buf[offset:], binaryWriter)
offset += p.fastWriteField3(buf[offset:], binaryWriter)
- offset += p.fastWriteField6(buf[offset:], binaryWriter)
- offset += p.fastWriteField7(buf[offset:], binaryWriter)
- offset += p.fastWriteField4(buf[offset:], binaryWriter)
- offset += p.fastWriteField5(buf[offset:], binaryWriter)
- offset += p.fastWriteField8(buf[offset:], binaryWriter)
}
offset += bthrift.Binary.WriteFieldStop(buf[offset:])
offset += bthrift.Binary.WriteStructEnd(buf[offset:])
return offset
}
-func (p *TIngestBinlogRequest) BLength() int {
+func (p *TWarmUpCacheAsyncRequest) BLength() int {
l := 0
- l += bthrift.Binary.StructBeginLength("TIngestBinlogRequest")
+ l += bthrift.Binary.StructBeginLength("TWarmUpCacheAsyncRequest")
if p != nil {
l += p.field1Length()
l += p.field2Length()
l += p.field3Length()
- l += p.field4Length()
- l += p.field5Length()
- l += p.field6Length()
- l += p.field7Length()
- l += p.field8Length()
}
l += bthrift.Binary.FieldStopLength()
l += bthrift.Binary.StructEndLength()
return l
}
-func (p *TIngestBinlogRequest) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int {
- offset := 0
- if p.IsSetTxnId() {
- offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "txn_id", thrift.I64, 1)
- offset += bthrift.Binary.WriteI64(buf[offset:], *p.TxnId)
-
- offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
- }
- return offset
-}
-
-func (p *TIngestBinlogRequest) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int {
- offset := 0
- if p.IsSetRemoteTabletId() {
- offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "remote_tablet_id", thrift.I64, 2)
- offset += bthrift.Binary.WriteI64(buf[offset:], *p.RemoteTabletId)
-
- offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
- }
- return offset
-}
-
-func (p *TIngestBinlogRequest) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int {
- offset := 0
- if p.IsSetBinlogVersion() {
- offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "binlog_version", thrift.I64, 3)
- offset += bthrift.Binary.WriteI64(buf[offset:], *p.BinlogVersion)
-
- offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
- }
- return offset
-}
-
-func (p *TIngestBinlogRequest) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int {
- offset := 0
- if p.IsSetRemoteHost() {
- offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "remote_host", thrift.STRING, 4)
- offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.RemoteHost)
-
- offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
- }
- return offset
-}
-
-func (p *TIngestBinlogRequest) fastWriteField5(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+func (p *TWarmUpCacheAsyncRequest) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int {
offset := 0
- if p.IsSetRemotePort() {
- offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "remote_port", thrift.STRING, 5)
- offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.RemotePort)
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "host", thrift.STRING, 1)
+ offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, p.Host)
- offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
- }
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
return offset
}
-func (p *TIngestBinlogRequest) fastWriteField6(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+func (p *TWarmUpCacheAsyncRequest) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int {
offset := 0
- if p.IsSetPartitionId() {
- offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "partition_id", thrift.I64, 6)
- offset += bthrift.Binary.WriteI64(buf[offset:], *p.PartitionId)
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "brpc_port", thrift.I32, 2)
+ offset += bthrift.Binary.WriteI32(buf[offset:], p.BrpcPort)
- offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
- }
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
return offset
}
-func (p *TIngestBinlogRequest) fastWriteField7(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+func (p *TWarmUpCacheAsyncRequest) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int {
offset := 0
- if p.IsSetLocalTabletId() {
- offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "local_tablet_id", thrift.I64, 7)
- offset += bthrift.Binary.WriteI64(buf[offset:], *p.LocalTabletId)
-
- offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
- }
- return offset
-}
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "tablet_ids", thrift.LIST, 3)
+ listBeginOffset := offset
+ offset += bthrift.Binary.ListBeginLength(thrift.I64, 0)
+ var length int
+ for _, v := range p.TabletIds {
+ length++
+ offset += bthrift.Binary.WriteI64(buf[offset:], v)
-func (p *TIngestBinlogRequest) fastWriteField8(buf []byte, binaryWriter bthrift.BinaryWriter) int {
- offset := 0
- if p.IsSetLoadId() {
- offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "load_id", thrift.STRUCT, 8)
- offset += p.LoadId.FastWriteNocopy(buf[offset:], binaryWriter)
- offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
}
+ bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.I64, length)
+ offset += bthrift.Binary.WriteListEnd(buf[offset:])
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
return offset
}
-func (p *TIngestBinlogRequest) field1Length() int {
+func (p *TWarmUpCacheAsyncRequest) field1Length() int {
l := 0
- if p.IsSetTxnId() {
- l += bthrift.Binary.FieldBeginLength("txn_id", thrift.I64, 1)
- l += bthrift.Binary.I64Length(*p.TxnId)
+ l += bthrift.Binary.FieldBeginLength("host", thrift.STRING, 1)
+ l += bthrift.Binary.StringLengthNocopy(p.Host)
- l += bthrift.Binary.FieldEndLength()
- }
+ l += bthrift.Binary.FieldEndLength()
return l
}
-func (p *TIngestBinlogRequest) field2Length() int {
+func (p *TWarmUpCacheAsyncRequest) field2Length() int {
l := 0
- if p.IsSetRemoteTabletId() {
- l += bthrift.Binary.FieldBeginLength("remote_tablet_id", thrift.I64, 2)
- l += bthrift.Binary.I64Length(*p.RemoteTabletId)
+ l += bthrift.Binary.FieldBeginLength("brpc_port", thrift.I32, 2)
+ l += bthrift.Binary.I32Length(p.BrpcPort)
- l += bthrift.Binary.FieldEndLength()
- }
+ l += bthrift.Binary.FieldEndLength()
return l
}
-func (p *TIngestBinlogRequest) field3Length() int {
+func (p *TWarmUpCacheAsyncRequest) field3Length() int {
l := 0
- if p.IsSetBinlogVersion() {
- l += bthrift.Binary.FieldBeginLength("binlog_version", thrift.I64, 3)
- l += bthrift.Binary.I64Length(*p.BinlogVersion)
-
- l += bthrift.Binary.FieldEndLength()
- }
+ l += bthrift.Binary.FieldBeginLength("tablet_ids", thrift.LIST, 3)
+ l += bthrift.Binary.ListBeginLength(thrift.I64, len(p.TabletIds))
+ var tmpV int64
+ l += bthrift.Binary.I64Length(int64(tmpV)) * len(p.TabletIds)
+ l += bthrift.Binary.ListEndLength()
+ l += bthrift.Binary.FieldEndLength()
return l
}
-func (p *TIngestBinlogRequest) field4Length() int {
- l := 0
- if p.IsSetRemoteHost() {
- l += bthrift.Binary.FieldBeginLength("remote_host", thrift.STRING, 4)
- l += bthrift.Binary.StringLengthNocopy(*p.RemoteHost)
+func (p *TWarmUpCacheAsyncResponse) FastRead(buf []byte) (int, error) {
+ var err error
+ var offset int
+ var l int
+ var fieldTypeId thrift.TType
+ var fieldId int16
+ var issetStatus bool = false
+ _, l, err = bthrift.Binary.ReadStructBegin(buf)
+ offset += l
+ if err != nil {
+ goto ReadStructBeginError
+ }
+
+ for {
+ _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldBeginError
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+ switch fieldId {
+ case 1:
+ if fieldTypeId == thrift.STRUCT {
+ l, err = p.FastReadField1(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ issetStatus = true
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+ default:
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+
+ l, err = bthrift.Binary.ReadFieldEnd(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldEndError
+ }
+ }
+ l, err = bthrift.Binary.ReadStructEnd(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadStructEndError
+ }
+
+ if !issetStatus {
+ fieldId = 1
+ goto RequiredFieldNotSetError
+ }
+ return offset, nil
+ReadStructBeginError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err)
+ReadFieldBeginError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
+ReadFieldError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TWarmUpCacheAsyncResponse[fieldId]), err)
+SkipFieldError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
+ReadFieldEndError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err)
+ReadStructEndError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+RequiredFieldNotSetError:
+ return offset, thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TWarmUpCacheAsyncResponse[fieldId]))
+}
+
+func (p *TWarmUpCacheAsyncResponse) FastReadField1(buf []byte) (int, error) {
+ offset := 0
+
+ tmp := status.NewTStatus()
+ if l, err := tmp.FastRead(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+ }
+ p.Status = tmp
+ return offset, nil
+}
+
+// for compatibility
+func (p *TWarmUpCacheAsyncResponse) FastWrite(buf []byte) int {
+ return 0
+}
+
+func (p *TWarmUpCacheAsyncResponse) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TWarmUpCacheAsyncResponse")
+ if p != nil {
+ offset += p.fastWriteField1(buf[offset:], binaryWriter)
+ }
+ offset += bthrift.Binary.WriteFieldStop(buf[offset:])
+ offset += bthrift.Binary.WriteStructEnd(buf[offset:])
+ return offset
+}
+
+func (p *TWarmUpCacheAsyncResponse) BLength() int {
+ l := 0
+ l += bthrift.Binary.StructBeginLength("TWarmUpCacheAsyncResponse")
+ if p != nil {
+ l += p.field1Length()
+ }
+ l += bthrift.Binary.FieldStopLength()
+ l += bthrift.Binary.StructEndLength()
+ return l
+}
+
+func (p *TWarmUpCacheAsyncResponse) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "status", thrift.STRUCT, 1)
+ offset += p.Status.FastWriteNocopy(buf[offset:], binaryWriter)
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ return offset
+}
+
+func (p *TWarmUpCacheAsyncResponse) field1Length() int {
+ l := 0
+ l += bthrift.Binary.FieldBeginLength("status", thrift.STRUCT, 1)
+ l += p.Status.BLength()
+ l += bthrift.Binary.FieldEndLength()
+ return l
+}
+
+func (p *TCheckWarmUpCacheAsyncRequest) FastRead(buf []byte) (int, error) {
+ var err error
+ var offset int
+ var l int
+ var fieldTypeId thrift.TType
+ var fieldId int16
+ _, l, err = bthrift.Binary.ReadStructBegin(buf)
+ offset += l
+ if err != nil {
+ goto ReadStructBeginError
+ }
+
+ for {
+ _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldBeginError
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+ switch fieldId {
+ case 1:
+ if fieldTypeId == thrift.LIST {
+ l, err = p.FastReadField1(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+ default:
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+
+ l, err = bthrift.Binary.ReadFieldEnd(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldEndError
+ }
+ }
+ l, err = bthrift.Binary.ReadStructEnd(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadStructEndError
+ }
+
+ return offset, nil
+ReadStructBeginError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err)
+ReadFieldBeginError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
+ReadFieldError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TCheckWarmUpCacheAsyncRequest[fieldId]), err)
+SkipFieldError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
+ReadFieldEndError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err)
+ReadStructEndError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+}
+
+func (p *TCheckWarmUpCacheAsyncRequest) FastReadField1(buf []byte) (int, error) {
+ offset := 0
+
+ _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:])
+ offset += l
+ if err != nil {
+ return offset, err
+ }
+ p.Tablets = make([]int64, 0, size)
+ for i := 0; i < size; i++ {
+ var _elem int64
+ if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+
+ _elem = v
+
+ }
+
+ p.Tablets = append(p.Tablets, _elem)
+ }
+ if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+ }
+ return offset, nil
+}
+
+// for compatibility
+func (p *TCheckWarmUpCacheAsyncRequest) FastWrite(buf []byte) int {
+ return 0
+}
+
+func (p *TCheckWarmUpCacheAsyncRequest) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TCheckWarmUpCacheAsyncRequest")
+ if p != nil {
+ offset += p.fastWriteField1(buf[offset:], binaryWriter)
+ }
+ offset += bthrift.Binary.WriteFieldStop(buf[offset:])
+ offset += bthrift.Binary.WriteStructEnd(buf[offset:])
+ return offset
+}
+
+func (p *TCheckWarmUpCacheAsyncRequest) BLength() int {
+ l := 0
+ l += bthrift.Binary.StructBeginLength("TCheckWarmUpCacheAsyncRequest")
+ if p != nil {
+ l += p.field1Length()
+ }
+ l += bthrift.Binary.FieldStopLength()
+ l += bthrift.Binary.StructEndLength()
+ return l
+}
+
+func (p *TCheckWarmUpCacheAsyncRequest) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ if p.IsSetTablets() {
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "tablets", thrift.LIST, 1)
+ listBeginOffset := offset
+ offset += bthrift.Binary.ListBeginLength(thrift.I64, 0)
+ var length int
+ for _, v := range p.Tablets {
+ length++
+ offset += bthrift.Binary.WriteI64(buf[offset:], v)
+
+ }
+ bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.I64, length)
+ offset += bthrift.Binary.WriteListEnd(buf[offset:])
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ }
+ return offset
+}
+
+func (p *TCheckWarmUpCacheAsyncRequest) field1Length() int {
+ l := 0
+ if p.IsSetTablets() {
+ l += bthrift.Binary.FieldBeginLength("tablets", thrift.LIST, 1)
+ l += bthrift.Binary.ListBeginLength(thrift.I64, len(p.Tablets))
+ var tmpV int64
+ l += bthrift.Binary.I64Length(int64(tmpV)) * len(p.Tablets)
+ l += bthrift.Binary.ListEndLength()
+ l += bthrift.Binary.FieldEndLength()
+ }
+ return l
+}
+
+func (p *TCheckWarmUpCacheAsyncResponse) FastRead(buf []byte) (int, error) {
+ var err error
+ var offset int
+ var l int
+ var fieldTypeId thrift.TType
+ var fieldId int16
+ var issetStatus bool = false
+ _, l, err = bthrift.Binary.ReadStructBegin(buf)
+ offset += l
+ if err != nil {
+ goto ReadStructBeginError
+ }
+
+ for {
+ _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldBeginError
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+ switch fieldId {
+ case 1:
+ if fieldTypeId == thrift.STRUCT {
+ l, err = p.FastReadField1(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ issetStatus = true
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+ case 2:
+ if fieldTypeId == thrift.MAP {
+ l, err = p.FastReadField2(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+ default:
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+
+ l, err = bthrift.Binary.ReadFieldEnd(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldEndError
+ }
+ }
+ l, err = bthrift.Binary.ReadStructEnd(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadStructEndError
+ }
+
+ if !issetStatus {
+ fieldId = 1
+ goto RequiredFieldNotSetError
+ }
+ return offset, nil
+ReadStructBeginError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err)
+ReadFieldBeginError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
+ReadFieldError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TCheckWarmUpCacheAsyncResponse[fieldId]), err)
+SkipFieldError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
+ReadFieldEndError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err)
+ReadStructEndError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+RequiredFieldNotSetError:
+ return offset, thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TCheckWarmUpCacheAsyncResponse[fieldId]))
+}
+
+func (p *TCheckWarmUpCacheAsyncResponse) FastReadField1(buf []byte) (int, error) {
+ offset := 0
+
+ tmp := status.NewTStatus()
+ if l, err := tmp.FastRead(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+ }
+ p.Status = tmp
+ return offset, nil
+}
+
+func (p *TCheckWarmUpCacheAsyncResponse) FastReadField2(buf []byte) (int, error) {
+ offset := 0
+
+ _, _, size, l, err := bthrift.Binary.ReadMapBegin(buf[offset:])
+ offset += l
+ if err != nil {
+ return offset, err
+ }
+ p.TaskDone = make(map[int64]bool, size)
+ for i := 0; i < size; i++ {
+ var _key int64
+ if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+
+ _key = v
+
+ }
+
+ var _val bool
+ if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+
+ _val = v
+
+ }
+
+ p.TaskDone[_key] = _val
+ }
+ if l, err := bthrift.Binary.ReadMapEnd(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+ }
+ return offset, nil
+}
+
+// for compatibility
+func (p *TCheckWarmUpCacheAsyncResponse) FastWrite(buf []byte) int {
+ return 0
+}
+
+func (p *TCheckWarmUpCacheAsyncResponse) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TCheckWarmUpCacheAsyncResponse")
+ if p != nil {
+ offset += p.fastWriteField1(buf[offset:], binaryWriter)
+ offset += p.fastWriteField2(buf[offset:], binaryWriter)
+ }
+ offset += bthrift.Binary.WriteFieldStop(buf[offset:])
+ offset += bthrift.Binary.WriteStructEnd(buf[offset:])
+ return offset
+}
+
+func (p *TCheckWarmUpCacheAsyncResponse) BLength() int {
+ l := 0
+ l += bthrift.Binary.StructBeginLength("TCheckWarmUpCacheAsyncResponse")
+ if p != nil {
+ l += p.field1Length()
+ l += p.field2Length()
+ }
+ l += bthrift.Binary.FieldStopLength()
+ l += bthrift.Binary.StructEndLength()
+ return l
+}
+
+func (p *TCheckWarmUpCacheAsyncResponse) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "status", thrift.STRUCT, 1)
+ offset += p.Status.FastWriteNocopy(buf[offset:], binaryWriter)
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ return offset
+}
+
+func (p *TCheckWarmUpCacheAsyncResponse) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ if p.IsSetTaskDone() {
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "task_done", thrift.MAP, 2)
+ mapBeginOffset := offset
+ offset += bthrift.Binary.MapBeginLength(thrift.I64, thrift.BOOL, 0)
+ var length int
+ for k, v := range p.TaskDone {
+ length++
+
+ offset += bthrift.Binary.WriteI64(buf[offset:], k)
+
+ offset += bthrift.Binary.WriteBool(buf[offset:], v)
+
+ }
+ bthrift.Binary.WriteMapBegin(buf[mapBeginOffset:], thrift.I64, thrift.BOOL, length)
+ offset += bthrift.Binary.WriteMapEnd(buf[offset:])
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ }
+ return offset
+}
+
+func (p *TCheckWarmUpCacheAsyncResponse) field1Length() int {
+ l := 0
+ l += bthrift.Binary.FieldBeginLength("status", thrift.STRUCT, 1)
+ l += p.Status.BLength()
+ l += bthrift.Binary.FieldEndLength()
+ return l
+}
+
+func (p *TCheckWarmUpCacheAsyncResponse) field2Length() int {
+ l := 0
+ if p.IsSetTaskDone() {
+ l += bthrift.Binary.FieldBeginLength("task_done", thrift.MAP, 2)
+ l += bthrift.Binary.MapBeginLength(thrift.I64, thrift.BOOL, len(p.TaskDone))
+ var tmpK int64
+ var tmpV bool
+ l += (bthrift.Binary.I64Length(int64(tmpK)) + bthrift.Binary.BoolLength(bool(tmpV))) * len(p.TaskDone)
+ l += bthrift.Binary.MapEndLength()
+ l += bthrift.Binary.FieldEndLength()
+ }
+ return l
+}
+
+func (p *TSyncLoadForTabletsRequest) FastRead(buf []byte) (int, error) {
+ var err error
+ var offset int
+ var l int
+ var fieldTypeId thrift.TType
+ var fieldId int16
+ var issetTabletIds bool = false
+ _, l, err = bthrift.Binary.ReadStructBegin(buf)
+ offset += l
+ if err != nil {
+ goto ReadStructBeginError
+ }
+
+ for {
+ _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldBeginError
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+ switch fieldId {
+ case 1:
+ if fieldTypeId == thrift.LIST {
+ l, err = p.FastReadField1(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ issetTabletIds = true
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+ default:
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+
+ l, err = bthrift.Binary.ReadFieldEnd(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldEndError
+ }
+ }
+ l, err = bthrift.Binary.ReadStructEnd(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadStructEndError
+ }
+
+ if !issetTabletIds {
+ fieldId = 1
+ goto RequiredFieldNotSetError
+ }
+ return offset, nil
+ReadStructBeginError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err)
+ReadFieldBeginError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
+ReadFieldError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TSyncLoadForTabletsRequest[fieldId]), err)
+SkipFieldError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
+ReadFieldEndError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err)
+ReadStructEndError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+RequiredFieldNotSetError:
+ return offset, thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TSyncLoadForTabletsRequest[fieldId]))
+}
+
+func (p *TSyncLoadForTabletsRequest) FastReadField1(buf []byte) (int, error) {
+ offset := 0
+
+ _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:])
+ offset += l
+ if err != nil {
+ return offset, err
+ }
+ p.TabletIds = make([]int64, 0, size)
+ for i := 0; i < size; i++ {
+ var _elem int64
+ if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+
+ _elem = v
+
+ }
+
+ p.TabletIds = append(p.TabletIds, _elem)
+ }
+ if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+ }
+ return offset, nil
+}
+
+// for compatibility
+func (p *TSyncLoadForTabletsRequest) FastWrite(buf []byte) int {
+ return 0
+}
+
+func (p *TSyncLoadForTabletsRequest) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TSyncLoadForTabletsRequest")
+ if p != nil {
+ offset += p.fastWriteField1(buf[offset:], binaryWriter)
+ }
+ offset += bthrift.Binary.WriteFieldStop(buf[offset:])
+ offset += bthrift.Binary.WriteStructEnd(buf[offset:])
+ return offset
+}
+
+func (p *TSyncLoadForTabletsRequest) BLength() int {
+ l := 0
+ l += bthrift.Binary.StructBeginLength("TSyncLoadForTabletsRequest")
+ if p != nil {
+ l += p.field1Length()
+ }
+ l += bthrift.Binary.FieldStopLength()
+ l += bthrift.Binary.StructEndLength()
+ return l
+}
+
+func (p *TSyncLoadForTabletsRequest) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "tablet_ids", thrift.LIST, 1)
+ listBeginOffset := offset
+ offset += bthrift.Binary.ListBeginLength(thrift.I64, 0)
+ var length int
+ for _, v := range p.TabletIds {
+ length++
+ offset += bthrift.Binary.WriteI64(buf[offset:], v)
+
+ }
+ bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.I64, length)
+ offset += bthrift.Binary.WriteListEnd(buf[offset:])
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ return offset
+}
+
+func (p *TSyncLoadForTabletsRequest) field1Length() int {
+ l := 0
+ l += bthrift.Binary.FieldBeginLength("tablet_ids", thrift.LIST, 1)
+ l += bthrift.Binary.ListBeginLength(thrift.I64, len(p.TabletIds))
+ var tmpV int64
+ l += bthrift.Binary.I64Length(int64(tmpV)) * len(p.TabletIds)
+ l += bthrift.Binary.ListEndLength()
+ l += bthrift.Binary.FieldEndLength()
+ return l
+}
+
+func (p *TSyncLoadForTabletsResponse) FastRead(buf []byte) (int, error) {
+ var err error
+ var offset int
+ var l int
+ var fieldTypeId thrift.TType
+ var fieldId int16
+ _, l, err = bthrift.Binary.ReadStructBegin(buf)
+ offset += l
+ if err != nil {
+ goto ReadStructBeginError
+ }
+
+ for {
+ _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldBeginError
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+
+ l, err = bthrift.Binary.ReadFieldEnd(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldEndError
+ }
+ }
+ l, err = bthrift.Binary.ReadStructEnd(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadStructEndError
+ }
+
+ return offset, nil
+ReadStructBeginError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err)
+ReadFieldBeginError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
+SkipFieldError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
+ReadFieldEndError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err)
+ReadStructEndError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+}
+
+// for compatibility
+func (p *TSyncLoadForTabletsResponse) FastWrite(buf []byte) int {
+ return 0
+}
+
+func (p *TSyncLoadForTabletsResponse) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TSyncLoadForTabletsResponse")
+ if p != nil {
+ }
+ offset += bthrift.Binary.WriteFieldStop(buf[offset:])
+ offset += bthrift.Binary.WriteStructEnd(buf[offset:])
+ return offset
+}
+
+func (p *TSyncLoadForTabletsResponse) BLength() int {
+ l := 0
+ l += bthrift.Binary.StructBeginLength("TSyncLoadForTabletsResponse")
+ if p != nil {
+ }
+ l += bthrift.Binary.FieldStopLength()
+ l += bthrift.Binary.StructEndLength()
+ return l
+}
+
+func (p *THotPartition) FastRead(buf []byte) (int, error) {
+ var err error
+ var offset int
+ var l int
+ var fieldTypeId thrift.TType
+ var fieldId int16
+ var issetPartitionId bool = false
+ var issetLastAccessTime bool = false
+ _, l, err = bthrift.Binary.ReadStructBegin(buf)
+ offset += l
+ if err != nil {
+ goto ReadStructBeginError
+ }
+
+ for {
+ _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldBeginError
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+ switch fieldId {
+ case 1:
+ if fieldTypeId == thrift.I64 {
+ l, err = p.FastReadField1(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ issetPartitionId = true
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+ case 2:
+ if fieldTypeId == thrift.I64 {
+ l, err = p.FastReadField2(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ issetLastAccessTime = true
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+ case 3:
+ if fieldTypeId == thrift.I64 {
+ l, err = p.FastReadField3(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+ case 4:
+ if fieldTypeId == thrift.I64 {
+ l, err = p.FastReadField4(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+ default:
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+
+ l, err = bthrift.Binary.ReadFieldEnd(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldEndError
+ }
+ }
+ l, err = bthrift.Binary.ReadStructEnd(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadStructEndError
+ }
+
+ if !issetPartitionId {
+ fieldId = 1
+ goto RequiredFieldNotSetError
+ }
+
+ if !issetLastAccessTime {
+ fieldId = 2
+ goto RequiredFieldNotSetError
+ }
+ return offset, nil
+ReadStructBeginError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err)
+ReadFieldBeginError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
+ReadFieldError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_THotPartition[fieldId]), err)
+SkipFieldError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
+ReadFieldEndError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err)
+ReadStructEndError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+RequiredFieldNotSetError:
+ return offset, thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_THotPartition[fieldId]))
+}
+
+func (p *THotPartition) FastReadField1(buf []byte) (int, error) {
+ offset := 0
+
+ if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+
+ p.PartitionId = v
+
+ }
+ return offset, nil
+}
+
+func (p *THotPartition) FastReadField2(buf []byte) (int, error) {
+ offset := 0
+
+ if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+
+ p.LastAccessTime = v
+
+ }
+ return offset, nil
+}
+
+func (p *THotPartition) FastReadField3(buf []byte) (int, error) {
+ offset := 0
+
+ if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+ p.QueryPerDay = &v
+
+ }
+ return offset, nil
+}
+
+func (p *THotPartition) FastReadField4(buf []byte) (int, error) {
+ offset := 0
+
+ if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+ p.QueryPerWeek = &v
+
+ }
+ return offset, nil
+}
+
+// for compatibility
+func (p *THotPartition) FastWrite(buf []byte) int {
+ return 0
+}
+
+func (p *THotPartition) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ offset += bthrift.Binary.WriteStructBegin(buf[offset:], "THotPartition")
+ if p != nil {
+ offset += p.fastWriteField1(buf[offset:], binaryWriter)
+ offset += p.fastWriteField2(buf[offset:], binaryWriter)
+ offset += p.fastWriteField3(buf[offset:], binaryWriter)
+ offset += p.fastWriteField4(buf[offset:], binaryWriter)
+ }
+ offset += bthrift.Binary.WriteFieldStop(buf[offset:])
+ offset += bthrift.Binary.WriteStructEnd(buf[offset:])
+ return offset
+}
+
+func (p *THotPartition) BLength() int {
+ l := 0
+ l += bthrift.Binary.StructBeginLength("THotPartition")
+ if p != nil {
+ l += p.field1Length()
+ l += p.field2Length()
+ l += p.field3Length()
+ l += p.field4Length()
+ }
+ l += bthrift.Binary.FieldStopLength()
+ l += bthrift.Binary.StructEndLength()
+ return l
+}
+
+func (p *THotPartition) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "partition_id", thrift.I64, 1)
+ offset += bthrift.Binary.WriteI64(buf[offset:], p.PartitionId)
+
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ return offset
+}
+
+func (p *THotPartition) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "last_access_time", thrift.I64, 2)
+ offset += bthrift.Binary.WriteI64(buf[offset:], p.LastAccessTime)
+
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ return offset
+}
+
+func (p *THotPartition) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ if p.IsSetQueryPerDay() {
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "query_per_day", thrift.I64, 3)
+ offset += bthrift.Binary.WriteI64(buf[offset:], *p.QueryPerDay)
+
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ }
+ return offset
+}
+
+func (p *THotPartition) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ if p.IsSetQueryPerWeek() {
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "query_per_week", thrift.I64, 4)
+ offset += bthrift.Binary.WriteI64(buf[offset:], *p.QueryPerWeek)
+
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ }
+ return offset
+}
+
+func (p *THotPartition) field1Length() int {
+ l := 0
+ l += bthrift.Binary.FieldBeginLength("partition_id", thrift.I64, 1)
+ l += bthrift.Binary.I64Length(p.PartitionId)
+
+ l += bthrift.Binary.FieldEndLength()
+ return l
+}
+
+func (p *THotPartition) field2Length() int {
+ l := 0
+ l += bthrift.Binary.FieldBeginLength("last_access_time", thrift.I64, 2)
+ l += bthrift.Binary.I64Length(p.LastAccessTime)
+
+ l += bthrift.Binary.FieldEndLength()
+ return l
+}
+
+func (p *THotPartition) field3Length() int {
+ l := 0
+ if p.IsSetQueryPerDay() {
+ l += bthrift.Binary.FieldBeginLength("query_per_day", thrift.I64, 3)
+ l += bthrift.Binary.I64Length(*p.QueryPerDay)
+
+ l += bthrift.Binary.FieldEndLength()
+ }
+ return l
+}
+
+func (p *THotPartition) field4Length() int {
+ l := 0
+ if p.IsSetQueryPerWeek() {
+ l += bthrift.Binary.FieldBeginLength("query_per_week", thrift.I64, 4)
+ l += bthrift.Binary.I64Length(*p.QueryPerWeek)
+
+ l += bthrift.Binary.FieldEndLength()
+ }
+ return l
+}
+
+func (p *THotTableMessage) FastRead(buf []byte) (int, error) {
+ var err error
+ var offset int
+ var l int
+ var fieldTypeId thrift.TType
+ var fieldId int16
+ var issetTableId bool = false
+ var issetIndexId bool = false
+ _, l, err = bthrift.Binary.ReadStructBegin(buf)
+ offset += l
+ if err != nil {
+ goto ReadStructBeginError
+ }
+
+ for {
+ _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldBeginError
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+ switch fieldId {
+ case 1:
+ if fieldTypeId == thrift.I64 {
+ l, err = p.FastReadField1(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ issetTableId = true
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+ case 2:
+ if fieldTypeId == thrift.I64 {
+ l, err = p.FastReadField2(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ issetIndexId = true
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+ case 3:
+ if fieldTypeId == thrift.LIST {
+ l, err = p.FastReadField3(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+ default:
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+
+ l, err = bthrift.Binary.ReadFieldEnd(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldEndError
+ }
+ }
+ l, err = bthrift.Binary.ReadStructEnd(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadStructEndError
+ }
+
+ if !issetTableId {
+ fieldId = 1
+ goto RequiredFieldNotSetError
+ }
+
+ if !issetIndexId {
+ fieldId = 2
+ goto RequiredFieldNotSetError
+ }
+ return offset, nil
+ReadStructBeginError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err)
+ReadFieldBeginError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
+ReadFieldError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_THotTableMessage[fieldId]), err)
+SkipFieldError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
+ReadFieldEndError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err)
+ReadStructEndError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+RequiredFieldNotSetError:
+ return offset, thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_THotTableMessage[fieldId]))
+}
+
+func (p *THotTableMessage) FastReadField1(buf []byte) (int, error) {
+ offset := 0
+
+ if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+
+ p.TableId = v
+
+ }
+ return offset, nil
+}
+
+func (p *THotTableMessage) FastReadField2(buf []byte) (int, error) {
+ offset := 0
+
+ if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+
+ p.IndexId = v
+
+ }
+ return offset, nil
+}
+
+func (p *THotTableMessage) FastReadField3(buf []byte) (int, error) {
+ offset := 0
+
+ _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:])
+ offset += l
+ if err != nil {
+ return offset, err
+ }
+ p.HotPartitions = make([]*THotPartition, 0, size)
+ for i := 0; i < size; i++ {
+ _elem := NewTHotPartition()
+ if l, err := _elem.FastRead(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+ }
+
+ p.HotPartitions = append(p.HotPartitions, _elem)
+ }
+ if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+ }
+ return offset, nil
+}
+
+// for compatibility
+func (p *THotTableMessage) FastWrite(buf []byte) int {
+ return 0
+}
+
+func (p *THotTableMessage) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ offset += bthrift.Binary.WriteStructBegin(buf[offset:], "THotTableMessage")
+ if p != nil {
+ offset += p.fastWriteField1(buf[offset:], binaryWriter)
+ offset += p.fastWriteField2(buf[offset:], binaryWriter)
+ offset += p.fastWriteField3(buf[offset:], binaryWriter)
+ }
+ offset += bthrift.Binary.WriteFieldStop(buf[offset:])
+ offset += bthrift.Binary.WriteStructEnd(buf[offset:])
+ return offset
+}
+
+func (p *THotTableMessage) BLength() int {
+ l := 0
+ l += bthrift.Binary.StructBeginLength("THotTableMessage")
+ if p != nil {
+ l += p.field1Length()
+ l += p.field2Length()
+ l += p.field3Length()
+ }
+ l += bthrift.Binary.FieldStopLength()
+ l += bthrift.Binary.StructEndLength()
+ return l
+}
+
+func (p *THotTableMessage) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "table_id", thrift.I64, 1)
+ offset += bthrift.Binary.WriteI64(buf[offset:], p.TableId)
+
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ return offset
+}
+
+func (p *THotTableMessage) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "index_id", thrift.I64, 2)
+ offset += bthrift.Binary.WriteI64(buf[offset:], p.IndexId)
+
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ return offset
+}
+
+func (p *THotTableMessage) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ if p.IsSetHotPartitions() {
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "hot_partitions", thrift.LIST, 3)
+ listBeginOffset := offset
+ offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0)
+ var length int
+ for _, v := range p.HotPartitions {
+ length++
+ offset += v.FastWriteNocopy(buf[offset:], binaryWriter)
+ }
+ bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length)
+ offset += bthrift.Binary.WriteListEnd(buf[offset:])
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ }
+ return offset
+}
+
+func (p *THotTableMessage) field1Length() int {
+ l := 0
+ l += bthrift.Binary.FieldBeginLength("table_id", thrift.I64, 1)
+ l += bthrift.Binary.I64Length(p.TableId)
+
+ l += bthrift.Binary.FieldEndLength()
+ return l
+}
+
+func (p *THotTableMessage) field2Length() int {
+ l := 0
+ l += bthrift.Binary.FieldBeginLength("index_id", thrift.I64, 2)
+ l += bthrift.Binary.I64Length(p.IndexId)
+
+ l += bthrift.Binary.FieldEndLength()
+ return l
+}
+
+func (p *THotTableMessage) field3Length() int {
+ l := 0
+ if p.IsSetHotPartitions() {
+ l += bthrift.Binary.FieldBeginLength("hot_partitions", thrift.LIST, 3)
+ l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.HotPartitions))
+ for _, v := range p.HotPartitions {
+ l += v.BLength()
+ }
+ l += bthrift.Binary.ListEndLength()
+ l += bthrift.Binary.FieldEndLength()
+ }
+ return l
+}
+
+func (p *TGetTopNHotPartitionsRequest) FastRead(buf []byte) (int, error) {
+ var err error
+ var offset int
+ var l int
+ var fieldTypeId thrift.TType
+ var fieldId int16
+ _, l, err = bthrift.Binary.ReadStructBegin(buf)
+ offset += l
+ if err != nil {
+ goto ReadStructBeginError
+ }
+
+ for {
+ _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldBeginError
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+
+ l, err = bthrift.Binary.ReadFieldEnd(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldEndError
+ }
+ }
+ l, err = bthrift.Binary.ReadStructEnd(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadStructEndError
+ }
+
+ return offset, nil
+ReadStructBeginError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err)
+ReadFieldBeginError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
+SkipFieldError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
+ReadFieldEndError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err)
+ReadStructEndError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+}
+
+// for compatibility
+func (p *TGetTopNHotPartitionsRequest) FastWrite(buf []byte) int {
+ return 0
+}
+
+func (p *TGetTopNHotPartitionsRequest) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TGetTopNHotPartitionsRequest")
+ if p != nil {
+ }
+ offset += bthrift.Binary.WriteFieldStop(buf[offset:])
+ offset += bthrift.Binary.WriteStructEnd(buf[offset:])
+ return offset
+}
+
+func (p *TGetTopNHotPartitionsRequest) BLength() int {
+ l := 0
+ l += bthrift.Binary.StructBeginLength("TGetTopNHotPartitionsRequest")
+ if p != nil {
+ }
+ l += bthrift.Binary.FieldStopLength()
+ l += bthrift.Binary.StructEndLength()
+ return l
+}
+
+func (p *TGetTopNHotPartitionsResponse) FastRead(buf []byte) (int, error) {
+ var err error
+ var offset int
+ var l int
+ var fieldTypeId thrift.TType
+ var fieldId int16
+ var issetFileCacheSize bool = false
+ _, l, err = bthrift.Binary.ReadStructBegin(buf)
+ offset += l
+ if err != nil {
+ goto ReadStructBeginError
+ }
+
+ for {
+ _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldBeginError
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+ switch fieldId {
+ case 1:
+ if fieldTypeId == thrift.I64 {
+ l, err = p.FastReadField1(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ issetFileCacheSize = true
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+ case 2:
+ if fieldTypeId == thrift.LIST {
+ l, err = p.FastReadField2(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+ default:
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+
+ l, err = bthrift.Binary.ReadFieldEnd(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldEndError
+ }
+ }
+ l, err = bthrift.Binary.ReadStructEnd(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadStructEndError
+ }
+
+ if !issetFileCacheSize {
+ fieldId = 1
+ goto RequiredFieldNotSetError
+ }
+ return offset, nil
+ReadStructBeginError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err)
+ReadFieldBeginError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
+ReadFieldError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TGetTopNHotPartitionsResponse[fieldId]), err)
+SkipFieldError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
+ReadFieldEndError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err)
+ReadStructEndError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+RequiredFieldNotSetError:
+ return offset, thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TGetTopNHotPartitionsResponse[fieldId]))
+}
+
+func (p *TGetTopNHotPartitionsResponse) FastReadField1(buf []byte) (int, error) {
+ offset := 0
+
+ if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+
+ p.FileCacheSize = v
+
+ }
+ return offset, nil
+}
+
+func (p *TGetTopNHotPartitionsResponse) FastReadField2(buf []byte) (int, error) {
+ offset := 0
+
+ _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:])
+ offset += l
+ if err != nil {
+ return offset, err
+ }
+ p.HotTables = make([]*THotTableMessage, 0, size)
+ for i := 0; i < size; i++ {
+ _elem := NewTHotTableMessage()
+ if l, err := _elem.FastRead(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+ }
+
+ p.HotTables = append(p.HotTables, _elem)
+ }
+ if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+ }
+ return offset, nil
+}
+
+// for compatibility
+func (p *TGetTopNHotPartitionsResponse) FastWrite(buf []byte) int {
+ return 0
+}
+
+func (p *TGetTopNHotPartitionsResponse) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TGetTopNHotPartitionsResponse")
+ if p != nil {
+ offset += p.fastWriteField1(buf[offset:], binaryWriter)
+ offset += p.fastWriteField2(buf[offset:], binaryWriter)
+ }
+ offset += bthrift.Binary.WriteFieldStop(buf[offset:])
+ offset += bthrift.Binary.WriteStructEnd(buf[offset:])
+ return offset
+}
+
+func (p *TGetTopNHotPartitionsResponse) BLength() int {
+ l := 0
+ l += bthrift.Binary.StructBeginLength("TGetTopNHotPartitionsResponse")
+ if p != nil {
+ l += p.field1Length()
+ l += p.field2Length()
+ }
+ l += bthrift.Binary.FieldStopLength()
+ l += bthrift.Binary.StructEndLength()
+ return l
+}
+
+func (p *TGetTopNHotPartitionsResponse) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "file_cache_size", thrift.I64, 1)
+ offset += bthrift.Binary.WriteI64(buf[offset:], p.FileCacheSize)
+
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ return offset
+}
+
+func (p *TGetTopNHotPartitionsResponse) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ if p.IsSetHotTables() {
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "hot_tables", thrift.LIST, 2)
+ listBeginOffset := offset
+ offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0)
+ var length int
+ for _, v := range p.HotTables {
+ length++
+ offset += v.FastWriteNocopy(buf[offset:], binaryWriter)
+ }
+ bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length)
+ offset += bthrift.Binary.WriteListEnd(buf[offset:])
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ }
+ return offset
+}
+
+func (p *TGetTopNHotPartitionsResponse) field1Length() int {
+ l := 0
+ l += bthrift.Binary.FieldBeginLength("file_cache_size", thrift.I64, 1)
+ l += bthrift.Binary.I64Length(p.FileCacheSize)
+
+ l += bthrift.Binary.FieldEndLength()
+ return l
+}
+
+func (p *TGetTopNHotPartitionsResponse) field2Length() int {
+ l := 0
+ if p.IsSetHotTables() {
+ l += bthrift.Binary.FieldBeginLength("hot_tables", thrift.LIST, 2)
+ l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.HotTables))
+ for _, v := range p.HotTables {
+ l += v.BLength()
+ }
+ l += bthrift.Binary.ListEndLength()
+ l += bthrift.Binary.FieldEndLength()
+ }
+ return l
+}
+
+func (p *TJobMeta) FastRead(buf []byte) (int, error) {
+ var err error
+ var offset int
+ var l int
+ var fieldTypeId thrift.TType
+ var fieldId int16
+ var issetDownloadType bool = false
+ _, l, err = bthrift.Binary.ReadStructBegin(buf)
+ offset += l
+ if err != nil {
+ goto ReadStructBeginError
+ }
+
+ for {
+ _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldBeginError
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+ switch fieldId {
+ case 1:
+ if fieldTypeId == thrift.I32 {
+ l, err = p.FastReadField1(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ issetDownloadType = true
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+ case 2:
+ if fieldTypeId == thrift.STRING {
+ l, err = p.FastReadField2(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+ case 3:
+ if fieldTypeId == thrift.I32 {
+ l, err = p.FastReadField3(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+ case 4:
+ if fieldTypeId == thrift.LIST {
+ l, err = p.FastReadField4(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+ default:
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+
+ l, err = bthrift.Binary.ReadFieldEnd(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldEndError
+ }
+ }
+ l, err = bthrift.Binary.ReadStructEnd(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadStructEndError
+ }
+
+ if !issetDownloadType {
+ fieldId = 1
+ goto RequiredFieldNotSetError
+ }
+ return offset, nil
+ReadStructBeginError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err)
+ReadFieldBeginError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
+ReadFieldError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TJobMeta[fieldId]), err)
+SkipFieldError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
+ReadFieldEndError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err)
+ReadStructEndError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+RequiredFieldNotSetError:
+ return offset, thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TJobMeta[fieldId]))
+}
+
+func (p *TJobMeta) FastReadField1(buf []byte) (int, error) {
+ offset := 0
+
+ if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+
+ p.DownloadType = TDownloadType(v)
+
+ }
+ return offset, nil
+}
+
+func (p *TJobMeta) FastReadField2(buf []byte) (int, error) {
+ offset := 0
+
+ if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+ p.BeIp = &v
+
+ }
+ return offset, nil
+}
+
+func (p *TJobMeta) FastReadField3(buf []byte) (int, error) {
+ offset := 0
+
+ if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+ p.BrpcPort = &v
+
+ }
+ return offset, nil
+}
+
+func (p *TJobMeta) FastReadField4(buf []byte) (int, error) {
+ offset := 0
+
+ _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:])
+ offset += l
+ if err != nil {
+ return offset, err
+ }
+ p.TabletIds = make([]int64, 0, size)
+ for i := 0; i < size; i++ {
+ var _elem int64
+ if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+
+ _elem = v
+
+ }
+
+ p.TabletIds = append(p.TabletIds, _elem)
+ }
+ if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+ }
+ return offset, nil
+}
+
+// for compatibility
+func (p *TJobMeta) FastWrite(buf []byte) int {
+ return 0
+}
+
+func (p *TJobMeta) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TJobMeta")
+ if p != nil {
+ offset += p.fastWriteField3(buf[offset:], binaryWriter)
+ offset += p.fastWriteField1(buf[offset:], binaryWriter)
+ offset += p.fastWriteField2(buf[offset:], binaryWriter)
+ offset += p.fastWriteField4(buf[offset:], binaryWriter)
+ }
+ offset += bthrift.Binary.WriteFieldStop(buf[offset:])
+ offset += bthrift.Binary.WriteStructEnd(buf[offset:])
+ return offset
+}
+
+func (p *TJobMeta) BLength() int {
+ l := 0
+ l += bthrift.Binary.StructBeginLength("TJobMeta")
+ if p != nil {
+ l += p.field1Length()
+ l += p.field2Length()
+ l += p.field3Length()
+ l += p.field4Length()
+ }
+ l += bthrift.Binary.FieldStopLength()
+ l += bthrift.Binary.StructEndLength()
+ return l
+}
+
+func (p *TJobMeta) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "download_type", thrift.I32, 1)
+ offset += bthrift.Binary.WriteI32(buf[offset:], int32(p.DownloadType))
+
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ return offset
+}
+
+func (p *TJobMeta) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ if p.IsSetBeIp() {
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "be_ip", thrift.STRING, 2)
+ offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.BeIp)
+
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ }
+ return offset
+}
+
+func (p *TJobMeta) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ if p.IsSetBrpcPort() {
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "brpc_port", thrift.I32, 3)
+ offset += bthrift.Binary.WriteI32(buf[offset:], *p.BrpcPort)
+
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ }
+ return offset
+}
+
+func (p *TJobMeta) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ if p.IsSetTabletIds() {
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "tablet_ids", thrift.LIST, 4)
+ listBeginOffset := offset
+ offset += bthrift.Binary.ListBeginLength(thrift.I64, 0)
+ var length int
+ for _, v := range p.TabletIds {
+ length++
+ offset += bthrift.Binary.WriteI64(buf[offset:], v)
+
+ }
+ bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.I64, length)
+ offset += bthrift.Binary.WriteListEnd(buf[offset:])
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ }
+ return offset
+}
+
+func (p *TJobMeta) field1Length() int {
+ l := 0
+ l += bthrift.Binary.FieldBeginLength("download_type", thrift.I32, 1)
+ l += bthrift.Binary.I32Length(int32(p.DownloadType))
+
+ l += bthrift.Binary.FieldEndLength()
+ return l
+}
+
+func (p *TJobMeta) field2Length() int {
+ l := 0
+ if p.IsSetBeIp() {
+ l += bthrift.Binary.FieldBeginLength("be_ip", thrift.STRING, 2)
+ l += bthrift.Binary.StringLengthNocopy(*p.BeIp)
+
+ l += bthrift.Binary.FieldEndLength()
+ }
+ return l
+}
+
+func (p *TJobMeta) field3Length() int {
+ l := 0
+ if p.IsSetBrpcPort() {
+ l += bthrift.Binary.FieldBeginLength("brpc_port", thrift.I32, 3)
+ l += bthrift.Binary.I32Length(*p.BrpcPort)
+
+ l += bthrift.Binary.FieldEndLength()
+ }
+ return l
+}
+
+func (p *TJobMeta) field4Length() int {
+ l := 0
+ if p.IsSetTabletIds() {
+ l += bthrift.Binary.FieldBeginLength("tablet_ids", thrift.LIST, 4)
+ l += bthrift.Binary.ListBeginLength(thrift.I64, len(p.TabletIds))
+ var tmpV int64
+ l += bthrift.Binary.I64Length(int64(tmpV)) * len(p.TabletIds)
+ l += bthrift.Binary.ListEndLength()
+ l += bthrift.Binary.FieldEndLength()
+ }
+ return l
+}
+
+func (p *TWarmUpTabletsRequest) FastRead(buf []byte) (int, error) {
+ var err error
+ var offset int
+ var l int
+ var fieldTypeId thrift.TType
+ var fieldId int16
+ var issetJobId bool = false
+ var issetBatchId bool = false
+ var issetType bool = false
+ _, l, err = bthrift.Binary.ReadStructBegin(buf)
+ offset += l
+ if err != nil {
+ goto ReadStructBeginError
+ }
+
+ for {
+ _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldBeginError
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+ switch fieldId {
+ case 1:
+ if fieldTypeId == thrift.I64 {
+ l, err = p.FastReadField1(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ issetJobId = true
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+ case 2:
+ if fieldTypeId == thrift.I64 {
+ l, err = p.FastReadField2(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ issetBatchId = true
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+ case 3:
+ if fieldTypeId == thrift.LIST {
+ l, err = p.FastReadField3(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+ case 4:
+ if fieldTypeId == thrift.I32 {
+ l, err = p.FastReadField4(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ issetType = true
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+ default:
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+
+ l, err = bthrift.Binary.ReadFieldEnd(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldEndError
+ }
+ }
+ l, err = bthrift.Binary.ReadStructEnd(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadStructEndError
+ }
+
+ if !issetJobId {
+ fieldId = 1
+ goto RequiredFieldNotSetError
+ }
+
+ if !issetBatchId {
+ fieldId = 2
+ goto RequiredFieldNotSetError
+ }
+
+ if !issetType {
+ fieldId = 4
+ goto RequiredFieldNotSetError
+ }
+ return offset, nil
+ReadStructBeginError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err)
+ReadFieldBeginError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
+ReadFieldError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TWarmUpTabletsRequest[fieldId]), err)
+SkipFieldError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
+ReadFieldEndError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err)
+ReadStructEndError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+RequiredFieldNotSetError:
+ return offset, thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TWarmUpTabletsRequest[fieldId]))
+}
+
+func (p *TWarmUpTabletsRequest) FastReadField1(buf []byte) (int, error) {
+ offset := 0
+
+ if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+
+ p.JobId = v
+
+ }
+ return offset, nil
+}
+
+func (p *TWarmUpTabletsRequest) FastReadField2(buf []byte) (int, error) {
+ offset := 0
+
+ if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+
+ p.BatchId = v
+
+ }
+ return offset, nil
+}
+
+func (p *TWarmUpTabletsRequest) FastReadField3(buf []byte) (int, error) {
+ offset := 0
+
+ _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:])
+ offset += l
+ if err != nil {
+ return offset, err
+ }
+ p.JobMetas = make([]*TJobMeta, 0, size)
+ for i := 0; i < size; i++ {
+ _elem := NewTJobMeta()
+ if l, err := _elem.FastRead(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+ }
+
+ p.JobMetas = append(p.JobMetas, _elem)
+ }
+ if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+ }
+ return offset, nil
+}
+
+func (p *TWarmUpTabletsRequest) FastReadField4(buf []byte) (int, error) {
+ offset := 0
+
+ if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+
+ p.Type = TWarmUpTabletsRequestType(v)
+
+ }
+ return offset, nil
+}
+
+// for compatibility
+func (p *TWarmUpTabletsRequest) FastWrite(buf []byte) int {
+ return 0
+}
+
+func (p *TWarmUpTabletsRequest) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TWarmUpTabletsRequest")
+ if p != nil {
+ offset += p.fastWriteField1(buf[offset:], binaryWriter)
+ offset += p.fastWriteField2(buf[offset:], binaryWriter)
+ offset += p.fastWriteField3(buf[offset:], binaryWriter)
+ offset += p.fastWriteField4(buf[offset:], binaryWriter)
+ }
+ offset += bthrift.Binary.WriteFieldStop(buf[offset:])
+ offset += bthrift.Binary.WriteStructEnd(buf[offset:])
+ return offset
+}
+
+func (p *TWarmUpTabletsRequest) BLength() int {
+ l := 0
+ l += bthrift.Binary.StructBeginLength("TWarmUpTabletsRequest")
+ if p != nil {
+ l += p.field1Length()
+ l += p.field2Length()
+ l += p.field3Length()
+ l += p.field4Length()
+ }
+ l += bthrift.Binary.FieldStopLength()
+ l += bthrift.Binary.StructEndLength()
+ return l
+}
+
+func (p *TWarmUpTabletsRequest) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "job_id", thrift.I64, 1)
+ offset += bthrift.Binary.WriteI64(buf[offset:], p.JobId)
+
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ return offset
+}
+
+func (p *TWarmUpTabletsRequest) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "batch_id", thrift.I64, 2)
+ offset += bthrift.Binary.WriteI64(buf[offset:], p.BatchId)
+
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ return offset
+}
+
+func (p *TWarmUpTabletsRequest) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ if p.IsSetJobMetas() {
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "job_metas", thrift.LIST, 3)
+ listBeginOffset := offset
+ offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0)
+ var length int
+ for _, v := range p.JobMetas {
+ length++
+ offset += v.FastWriteNocopy(buf[offset:], binaryWriter)
+ }
+ bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length)
+ offset += bthrift.Binary.WriteListEnd(buf[offset:])
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ }
+ return offset
+}
+
+func (p *TWarmUpTabletsRequest) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "type", thrift.I32, 4)
+ offset += bthrift.Binary.WriteI32(buf[offset:], int32(p.Type))
+
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ return offset
+}
+
+func (p *TWarmUpTabletsRequest) field1Length() int {
+ l := 0
+ l += bthrift.Binary.FieldBeginLength("job_id", thrift.I64, 1)
+ l += bthrift.Binary.I64Length(p.JobId)
+
+ l += bthrift.Binary.FieldEndLength()
+ return l
+}
+
+func (p *TWarmUpTabletsRequest) field2Length() int {
+ l := 0
+ l += bthrift.Binary.FieldBeginLength("batch_id", thrift.I64, 2)
+ l += bthrift.Binary.I64Length(p.BatchId)
+
+ l += bthrift.Binary.FieldEndLength()
+ return l
+}
+
+func (p *TWarmUpTabletsRequest) field3Length() int {
+ l := 0
+ if p.IsSetJobMetas() {
+ l += bthrift.Binary.FieldBeginLength("job_metas", thrift.LIST, 3)
+ l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.JobMetas))
+ for _, v := range p.JobMetas {
+ l += v.BLength()
+ }
+ l += bthrift.Binary.ListEndLength()
+ l += bthrift.Binary.FieldEndLength()
+ }
+ return l
+}
+
+func (p *TWarmUpTabletsRequest) field4Length() int {
+ l := 0
+ l += bthrift.Binary.FieldBeginLength("type", thrift.I32, 4)
+ l += bthrift.Binary.I32Length(int32(p.Type))
+
+ l += bthrift.Binary.FieldEndLength()
+ return l
+}
+
+func (p *TWarmUpTabletsResponse) FastRead(buf []byte) (int, error) {
+ var err error
+ var offset int
+ var l int
+ var fieldTypeId thrift.TType
+ var fieldId int16
+ var issetStatus bool = false
+ _, l, err = bthrift.Binary.ReadStructBegin(buf)
+ offset += l
+ if err != nil {
+ goto ReadStructBeginError
+ }
+
+ for {
+ _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldBeginError
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+ switch fieldId {
+ case 1:
+ if fieldTypeId == thrift.STRUCT {
+ l, err = p.FastReadField1(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ issetStatus = true
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+ case 2:
+ if fieldTypeId == thrift.I64 {
+ l, err = p.FastReadField2(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+ case 3:
+ if fieldTypeId == thrift.I64 {
+ l, err = p.FastReadField3(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+ case 4:
+ if fieldTypeId == thrift.I64 {
+ l, err = p.FastReadField4(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+ case 5:
+ if fieldTypeId == thrift.I64 {
+ l, err = p.FastReadField5(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+ default:
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+
+ l, err = bthrift.Binary.ReadFieldEnd(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldEndError
+ }
+ }
+ l, err = bthrift.Binary.ReadStructEnd(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadStructEndError
+ }
+
+ if !issetStatus {
+ fieldId = 1
+ goto RequiredFieldNotSetError
+ }
+ return offset, nil
+ReadStructBeginError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err)
+ReadFieldBeginError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
+ReadFieldError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TWarmUpTabletsResponse[fieldId]), err)
+SkipFieldError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
+ReadFieldEndError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err)
+ReadStructEndError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+RequiredFieldNotSetError:
+ return offset, thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TWarmUpTabletsResponse[fieldId]))
+}
+
+func (p *TWarmUpTabletsResponse) FastReadField1(buf []byte) (int, error) {
+ offset := 0
+
+ tmp := status.NewTStatus()
+ if l, err := tmp.FastRead(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+ }
+ p.Status = tmp
+ return offset, nil
+}
+
+func (p *TWarmUpTabletsResponse) FastReadField2(buf []byte) (int, error) {
+ offset := 0
+
+ if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+ p.JobId = &v
+
+ }
+ return offset, nil
+}
+
+func (p *TWarmUpTabletsResponse) FastReadField3(buf []byte) (int, error) {
+ offset := 0
+
+ if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+ p.BatchId = &v
+
+ }
+ return offset, nil
+}
+
+func (p *TWarmUpTabletsResponse) FastReadField4(buf []byte) (int, error) {
+ offset := 0
+
+ if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+ p.PendingJobSize = &v
+
+ }
+ return offset, nil
+}
+
+func (p *TWarmUpTabletsResponse) FastReadField5(buf []byte) (int, error) {
+ offset := 0
+
+ if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+ p.FinishJobSize = &v
+
+ }
+ return offset, nil
+}
+
+// for compatibility
+func (p *TWarmUpTabletsResponse) FastWrite(buf []byte) int {
+ return 0
+}
+
+func (p *TWarmUpTabletsResponse) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TWarmUpTabletsResponse")
+ if p != nil {
+ offset += p.fastWriteField2(buf[offset:], binaryWriter)
+ offset += p.fastWriteField3(buf[offset:], binaryWriter)
+ offset += p.fastWriteField4(buf[offset:], binaryWriter)
+ offset += p.fastWriteField5(buf[offset:], binaryWriter)
+ offset += p.fastWriteField1(buf[offset:], binaryWriter)
+ }
+ offset += bthrift.Binary.WriteFieldStop(buf[offset:])
+ offset += bthrift.Binary.WriteStructEnd(buf[offset:])
+ return offset
+}
+
+func (p *TWarmUpTabletsResponse) BLength() int {
+ l := 0
+ l += bthrift.Binary.StructBeginLength("TWarmUpTabletsResponse")
+ if p != nil {
+ l += p.field1Length()
+ l += p.field2Length()
+ l += p.field3Length()
+ l += p.field4Length()
+ l += p.field5Length()
+ }
+ l += bthrift.Binary.FieldStopLength()
+ l += bthrift.Binary.StructEndLength()
+ return l
+}
+
+func (p *TWarmUpTabletsResponse) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "status", thrift.STRUCT, 1)
+ offset += p.Status.FastWriteNocopy(buf[offset:], binaryWriter)
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ return offset
+}
+
+func (p *TWarmUpTabletsResponse) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ if p.IsSetJobId() {
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "job_id", thrift.I64, 2)
+ offset += bthrift.Binary.WriteI64(buf[offset:], *p.JobId)
+
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ }
+ return offset
+}
+
+func (p *TWarmUpTabletsResponse) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ if p.IsSetBatchId() {
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "batch_id", thrift.I64, 3)
+ offset += bthrift.Binary.WriteI64(buf[offset:], *p.BatchId)
+
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ }
+ return offset
+}
+
+func (p *TWarmUpTabletsResponse) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ if p.IsSetPendingJobSize() {
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "pending_job_size", thrift.I64, 4)
+ offset += bthrift.Binary.WriteI64(buf[offset:], *p.PendingJobSize)
+
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ }
+ return offset
+}
+
+func (p *TWarmUpTabletsResponse) fastWriteField5(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ if p.IsSetFinishJobSize() {
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "finish_job_size", thrift.I64, 5)
+ offset += bthrift.Binary.WriteI64(buf[offset:], *p.FinishJobSize)
+
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ }
+ return offset
+}
+
+func (p *TWarmUpTabletsResponse) field1Length() int {
+ l := 0
+ l += bthrift.Binary.FieldBeginLength("status", thrift.STRUCT, 1)
+ l += p.Status.BLength()
+ l += bthrift.Binary.FieldEndLength()
+ return l
+}
+
+func (p *TWarmUpTabletsResponse) field2Length() int {
+ l := 0
+ if p.IsSetJobId() {
+ l += bthrift.Binary.FieldBeginLength("job_id", thrift.I64, 2)
+ l += bthrift.Binary.I64Length(*p.JobId)
+
+ l += bthrift.Binary.FieldEndLength()
+ }
+ return l
+}
+
+func (p *TWarmUpTabletsResponse) field3Length() int {
+ l := 0
+ if p.IsSetBatchId() {
+ l += bthrift.Binary.FieldBeginLength("batch_id", thrift.I64, 3)
+ l += bthrift.Binary.I64Length(*p.BatchId)
+
+ l += bthrift.Binary.FieldEndLength()
+ }
+ return l
+}
+
+func (p *TWarmUpTabletsResponse) field4Length() int {
+ l := 0
+ if p.IsSetPendingJobSize() {
+ l += bthrift.Binary.FieldBeginLength("pending_job_size", thrift.I64, 4)
+ l += bthrift.Binary.I64Length(*p.PendingJobSize)
+
+ l += bthrift.Binary.FieldEndLength()
+ }
+ return l
+}
+
+func (p *TWarmUpTabletsResponse) field5Length() int {
+ l := 0
+ if p.IsSetFinishJobSize() {
+ l += bthrift.Binary.FieldBeginLength("finish_job_size", thrift.I64, 5)
+ l += bthrift.Binary.I64Length(*p.FinishJobSize)
+
+ l += bthrift.Binary.FieldEndLength()
+ }
+ return l
+}
+
+func (p *TIngestBinlogRequest) FastRead(buf []byte) (int, error) {
+ var err error
+ var offset int
+ var l int
+ var fieldTypeId thrift.TType
+ var fieldId int16
+ _, l, err = bthrift.Binary.ReadStructBegin(buf)
+ offset += l
+ if err != nil {
+ goto ReadStructBeginError
+ }
+
+ for {
+ _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldBeginError
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+ switch fieldId {
+ case 1:
+ if fieldTypeId == thrift.I64 {
+ l, err = p.FastReadField1(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+ case 2:
+ if fieldTypeId == thrift.I64 {
+ l, err = p.FastReadField2(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+ case 3:
+ if fieldTypeId == thrift.I64 {
+ l, err = p.FastReadField3(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+ case 4:
+ if fieldTypeId == thrift.STRING {
+ l, err = p.FastReadField4(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+ case 5:
+ if fieldTypeId == thrift.STRING {
+ l, err = p.FastReadField5(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+ case 6:
+ if fieldTypeId == thrift.I64 {
+ l, err = p.FastReadField6(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+ case 7:
+ if fieldTypeId == thrift.I64 {
+ l, err = p.FastReadField7(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+ case 8:
+ if fieldTypeId == thrift.STRUCT {
+ l, err = p.FastReadField8(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+ default:
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+
+ l, err = bthrift.Binary.ReadFieldEnd(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldEndError
+ }
+ }
+ l, err = bthrift.Binary.ReadStructEnd(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadStructEndError
+ }
+
+ return offset, nil
+ReadStructBeginError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err)
+ReadFieldBeginError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
+ReadFieldError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TIngestBinlogRequest[fieldId]), err)
+SkipFieldError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
+ReadFieldEndError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err)
+ReadStructEndError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+}
+
+func (p *TIngestBinlogRequest) FastReadField1(buf []byte) (int, error) {
+ offset := 0
+
+ if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+ p.TxnId = &v
+
+ }
+ return offset, nil
+}
+
+func (p *TIngestBinlogRequest) FastReadField2(buf []byte) (int, error) {
+ offset := 0
+
+ if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+ p.RemoteTabletId = &v
+
+ }
+ return offset, nil
+}
+
+func (p *TIngestBinlogRequest) FastReadField3(buf []byte) (int, error) {
+ offset := 0
+
+ if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+ p.BinlogVersion = &v
+
+ }
+ return offset, nil
+}
+
+func (p *TIngestBinlogRequest) FastReadField4(buf []byte) (int, error) {
+ offset := 0
+
+ if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+ p.RemoteHost = &v
+
+ }
+ return offset, nil
+}
+
+func (p *TIngestBinlogRequest) FastReadField5(buf []byte) (int, error) {
+ offset := 0
+
+ if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+ p.RemotePort = &v
+
+ }
+ return offset, nil
+}
+
+func (p *TIngestBinlogRequest) FastReadField6(buf []byte) (int, error) {
+ offset := 0
+
+ if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+ p.PartitionId = &v
+
+ }
+ return offset, nil
+}
+
+func (p *TIngestBinlogRequest) FastReadField7(buf []byte) (int, error) {
+ offset := 0
+
+ if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+ p.LocalTabletId = &v
+
+ }
+ return offset, nil
+}
+
+func (p *TIngestBinlogRequest) FastReadField8(buf []byte) (int, error) {
+ offset := 0
+
+ tmp := types.NewTUniqueId()
+ if l, err := tmp.FastRead(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+ }
+ p.LoadId = tmp
+ return offset, nil
+}
+
+// for compatibility
+func (p *TIngestBinlogRequest) FastWrite(buf []byte) int {
+ return 0
+}
+
+func (p *TIngestBinlogRequest) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TIngestBinlogRequest")
+ if p != nil {
+ offset += p.fastWriteField1(buf[offset:], binaryWriter)
+ offset += p.fastWriteField2(buf[offset:], binaryWriter)
+ offset += p.fastWriteField3(buf[offset:], binaryWriter)
+ offset += p.fastWriteField6(buf[offset:], binaryWriter)
+ offset += p.fastWriteField7(buf[offset:], binaryWriter)
+ offset += p.fastWriteField4(buf[offset:], binaryWriter)
+ offset += p.fastWriteField5(buf[offset:], binaryWriter)
+ offset += p.fastWriteField8(buf[offset:], binaryWriter)
+ }
+ offset += bthrift.Binary.WriteFieldStop(buf[offset:])
+ offset += bthrift.Binary.WriteStructEnd(buf[offset:])
+ return offset
+}
+
+func (p *TIngestBinlogRequest) BLength() int {
+ l := 0
+ l += bthrift.Binary.StructBeginLength("TIngestBinlogRequest")
+ if p != nil {
+ l += p.field1Length()
+ l += p.field2Length()
+ l += p.field3Length()
+ l += p.field4Length()
+ l += p.field5Length()
+ l += p.field6Length()
+ l += p.field7Length()
+ l += p.field8Length()
+ }
+ l += bthrift.Binary.FieldStopLength()
+ l += bthrift.Binary.StructEndLength()
+ return l
+}
+
+func (p *TIngestBinlogRequest) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ if p.IsSetTxnId() {
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "txn_id", thrift.I64, 1)
+ offset += bthrift.Binary.WriteI64(buf[offset:], *p.TxnId)
+
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ }
+ return offset
+}
+
+func (p *TIngestBinlogRequest) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ if p.IsSetRemoteTabletId() {
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "remote_tablet_id", thrift.I64, 2)
+ offset += bthrift.Binary.WriteI64(buf[offset:], *p.RemoteTabletId)
+
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ }
+ return offset
+}
+
+func (p *TIngestBinlogRequest) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ if p.IsSetBinlogVersion() {
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "binlog_version", thrift.I64, 3)
+ offset += bthrift.Binary.WriteI64(buf[offset:], *p.BinlogVersion)
+
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ }
+ return offset
+}
+
+func (p *TIngestBinlogRequest) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ if p.IsSetRemoteHost() {
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "remote_host", thrift.STRING, 4)
+ offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.RemoteHost)
+
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ }
+ return offset
+}
+
+func (p *TIngestBinlogRequest) fastWriteField5(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ if p.IsSetRemotePort() {
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "remote_port", thrift.STRING, 5)
+ offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.RemotePort)
+
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ }
+ return offset
+}
+
+func (p *TIngestBinlogRequest) fastWriteField6(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ if p.IsSetPartitionId() {
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "partition_id", thrift.I64, 6)
+ offset += bthrift.Binary.WriteI64(buf[offset:], *p.PartitionId)
+
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ }
+ return offset
+}
+
+func (p *TIngestBinlogRequest) fastWriteField7(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ if p.IsSetLocalTabletId() {
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "local_tablet_id", thrift.I64, 7)
+ offset += bthrift.Binary.WriteI64(buf[offset:], *p.LocalTabletId)
+
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ }
+ return offset
+}
+
+func (p *TIngestBinlogRequest) fastWriteField8(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ if p.IsSetLoadId() {
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "load_id", thrift.STRUCT, 8)
+ offset += p.LoadId.FastWriteNocopy(buf[offset:], binaryWriter)
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ }
+ return offset
+}
+
+func (p *TIngestBinlogRequest) field1Length() int {
+ l := 0
+ if p.IsSetTxnId() {
+ l += bthrift.Binary.FieldBeginLength("txn_id", thrift.I64, 1)
+ l += bthrift.Binary.I64Length(*p.TxnId)
+
+ l += bthrift.Binary.FieldEndLength()
+ }
+ return l
+}
+
+func (p *TIngestBinlogRequest) field2Length() int {
+ l := 0
+ if p.IsSetRemoteTabletId() {
+ l += bthrift.Binary.FieldBeginLength("remote_tablet_id", thrift.I64, 2)
+ l += bthrift.Binary.I64Length(*p.RemoteTabletId)
+
+ l += bthrift.Binary.FieldEndLength()
+ }
+ return l
+}
+
+func (p *TIngestBinlogRequest) field3Length() int {
+ l := 0
+ if p.IsSetBinlogVersion() {
+ l += bthrift.Binary.FieldBeginLength("binlog_version", thrift.I64, 3)
+ l += bthrift.Binary.I64Length(*p.BinlogVersion)
+
+ l += bthrift.Binary.FieldEndLength()
+ }
+ return l
+}
+
+func (p *TIngestBinlogRequest) field4Length() int {
+ l := 0
+ if p.IsSetRemoteHost() {
+ l += bthrift.Binary.FieldBeginLength("remote_host", thrift.STRING, 4)
+ l += bthrift.Binary.StringLengthNocopy(*p.RemoteHost)
+
+ l += bthrift.Binary.FieldEndLength()
+ }
+ return l
+}
+
+func (p *TIngestBinlogRequest) field5Length() int {
+ l := 0
+ if p.IsSetRemotePort() {
+ l += bthrift.Binary.FieldBeginLength("remote_port", thrift.STRING, 5)
+ l += bthrift.Binary.StringLengthNocopy(*p.RemotePort)
+
+ l += bthrift.Binary.FieldEndLength()
+ }
+ return l
+}
+
+func (p *TIngestBinlogRequest) field6Length() int {
+ l := 0
+ if p.IsSetPartitionId() {
+ l += bthrift.Binary.FieldBeginLength("partition_id", thrift.I64, 6)
+ l += bthrift.Binary.I64Length(*p.PartitionId)
+
+ l += bthrift.Binary.FieldEndLength()
+ }
+ return l
+}
+
+func (p *TIngestBinlogRequest) field7Length() int {
+ l := 0
+ if p.IsSetLocalTabletId() {
+ l += bthrift.Binary.FieldBeginLength("local_tablet_id", thrift.I64, 7)
+ l += bthrift.Binary.I64Length(*p.LocalTabletId)
+
+ l += bthrift.Binary.FieldEndLength()
+ }
+ return l
+}
+
+func (p *TIngestBinlogRequest) field8Length() int {
+ l := 0
+ if p.IsSetLoadId() {
+ l += bthrift.Binary.FieldBeginLength("load_id", thrift.STRUCT, 8)
+ l += p.LoadId.BLength()
+ l += bthrift.Binary.FieldEndLength()
+ }
+ return l
+}
+
+func (p *TIngestBinlogResult_) FastRead(buf []byte) (int, error) {
+ var err error
+ var offset int
+ var l int
+ var fieldTypeId thrift.TType
+ var fieldId int16
+ _, l, err = bthrift.Binary.ReadStructBegin(buf)
+ offset += l
+ if err != nil {
+ goto ReadStructBeginError
+ }
+
+ for {
+ _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldBeginError
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+ switch fieldId {
+ case 1:
+ if fieldTypeId == thrift.STRUCT {
+ l, err = p.FastReadField1(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+ case 2:
+ if fieldTypeId == thrift.BOOL {
+ l, err = p.FastReadField2(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+ default:
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+
+ l, err = bthrift.Binary.ReadFieldEnd(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldEndError
+ }
+ }
+ l, err = bthrift.Binary.ReadStructEnd(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadStructEndError
+ }
+
+ return offset, nil
+ReadStructBeginError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err)
+ReadFieldBeginError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
+ReadFieldError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TIngestBinlogResult_[fieldId]), err)
+SkipFieldError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
+ReadFieldEndError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err)
+ReadStructEndError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+}
+
+func (p *TIngestBinlogResult_) FastReadField1(buf []byte) (int, error) {
+ offset := 0
+
+ tmp := status.NewTStatus()
+ if l, err := tmp.FastRead(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+ }
+ p.Status = tmp
+ return offset, nil
+}
+
+func (p *TIngestBinlogResult_) FastReadField2(buf []byte) (int, error) {
+ offset := 0
+
+ if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+ p.IsAsync = &v
+
+ }
+ return offset, nil
+}
+
+// for compatibility
+func (p *TIngestBinlogResult_) FastWrite(buf []byte) int {
+ return 0
+}
+
+func (p *TIngestBinlogResult_) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TIngestBinlogResult")
+ if p != nil {
+ offset += p.fastWriteField2(buf[offset:], binaryWriter)
+ offset += p.fastWriteField1(buf[offset:], binaryWriter)
+ }
+ offset += bthrift.Binary.WriteFieldStop(buf[offset:])
+ offset += bthrift.Binary.WriteStructEnd(buf[offset:])
+ return offset
+}
+
+func (p *TIngestBinlogResult_) BLength() int {
+ l := 0
+ l += bthrift.Binary.StructBeginLength("TIngestBinlogResult")
+ if p != nil {
+ l += p.field1Length()
+ l += p.field2Length()
+ }
+ l += bthrift.Binary.FieldStopLength()
+ l += bthrift.Binary.StructEndLength()
+ return l
+}
+
+func (p *TIngestBinlogResult_) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ if p.IsSetStatus() {
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "status", thrift.STRUCT, 1)
+ offset += p.Status.FastWriteNocopy(buf[offset:], binaryWriter)
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ }
+ return offset
+}
+
+func (p *TIngestBinlogResult_) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ if p.IsSetIsAsync() {
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "is_async", thrift.BOOL, 2)
+ offset += bthrift.Binary.WriteBool(buf[offset:], *p.IsAsync)
+
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ }
+ return offset
+}
+
+func (p *TIngestBinlogResult_) field1Length() int {
+ l := 0
+ if p.IsSetStatus() {
+ l += bthrift.Binary.FieldBeginLength("status", thrift.STRUCT, 1)
+ l += p.Status.BLength()
+ l += bthrift.Binary.FieldEndLength()
+ }
+ return l
+}
+
+func (p *TIngestBinlogResult_) field2Length() int {
+ l := 0
+ if p.IsSetIsAsync() {
+ l += bthrift.Binary.FieldBeginLength("is_async", thrift.BOOL, 2)
+ l += bthrift.Binary.BoolLength(*p.IsAsync)
+
+ l += bthrift.Binary.FieldEndLength()
+ }
+ return l
+}
+
+func (p *TQueryIngestBinlogRequest) FastRead(buf []byte) (int, error) {
+ var err error
+ var offset int
+ var l int
+ var fieldTypeId thrift.TType
+ var fieldId int16
+ _, l, err = bthrift.Binary.ReadStructBegin(buf)
+ offset += l
+ if err != nil {
+ goto ReadStructBeginError
+ }
+
+ for {
+ _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldBeginError
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+ switch fieldId {
+ case 1:
+ if fieldTypeId == thrift.I64 {
+ l, err = p.FastReadField1(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+ case 2:
+ if fieldTypeId == thrift.I64 {
+ l, err = p.FastReadField2(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+ case 3:
+ if fieldTypeId == thrift.I64 {
+ l, err = p.FastReadField3(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+ case 4:
+ if fieldTypeId == thrift.STRUCT {
+ l, err = p.FastReadField4(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+ default:
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+
+ l, err = bthrift.Binary.ReadFieldEnd(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldEndError
+ }
+ }
+ l, err = bthrift.Binary.ReadStructEnd(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadStructEndError
+ }
+
+ return offset, nil
+ReadStructBeginError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err)
+ReadFieldBeginError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
+ReadFieldError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TQueryIngestBinlogRequest[fieldId]), err)
+SkipFieldError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
+ReadFieldEndError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err)
+ReadStructEndError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+}
+
+func (p *TQueryIngestBinlogRequest) FastReadField1(buf []byte) (int, error) {
+ offset := 0
+
+ if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+ p.TxnId = &v
+
+ }
+ return offset, nil
+}
+
+func (p *TQueryIngestBinlogRequest) FastReadField2(buf []byte) (int, error) {
+ offset := 0
+
+ if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+ p.PartitionId = &v
+
+ }
+ return offset, nil
+}
+
+func (p *TQueryIngestBinlogRequest) FastReadField3(buf []byte) (int, error) {
+ offset := 0
+
+ if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+ p.TabletId = &v
+
+ }
+ return offset, nil
+}
+
+func (p *TQueryIngestBinlogRequest) FastReadField4(buf []byte) (int, error) {
+ offset := 0
+
+ tmp := types.NewTUniqueId()
+ if l, err := tmp.FastRead(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+ }
+ p.LoadId = tmp
+ return offset, nil
+}
+
+// for compatibility
+func (p *TQueryIngestBinlogRequest) FastWrite(buf []byte) int {
+ return 0
+}
+
+func (p *TQueryIngestBinlogRequest) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TQueryIngestBinlogRequest")
+ if p != nil {
+ offset += p.fastWriteField1(buf[offset:], binaryWriter)
+ offset += p.fastWriteField2(buf[offset:], binaryWriter)
+ offset += p.fastWriteField3(buf[offset:], binaryWriter)
+ offset += p.fastWriteField4(buf[offset:], binaryWriter)
+ }
+ offset += bthrift.Binary.WriteFieldStop(buf[offset:])
+ offset += bthrift.Binary.WriteStructEnd(buf[offset:])
+ return offset
+}
+
+func (p *TQueryIngestBinlogRequest) BLength() int {
+ l := 0
+ l += bthrift.Binary.StructBeginLength("TQueryIngestBinlogRequest")
+ if p != nil {
+ l += p.field1Length()
+ l += p.field2Length()
+ l += p.field3Length()
+ l += p.field4Length()
+ }
+ l += bthrift.Binary.FieldStopLength()
+ l += bthrift.Binary.StructEndLength()
+ return l
+}
+
+func (p *TQueryIngestBinlogRequest) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ if p.IsSetTxnId() {
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "txn_id", thrift.I64, 1)
+ offset += bthrift.Binary.WriteI64(buf[offset:], *p.TxnId)
+
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ }
+ return offset
+}
+
+func (p *TQueryIngestBinlogRequest) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ if p.IsSetPartitionId() {
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "partition_id", thrift.I64, 2)
+ offset += bthrift.Binary.WriteI64(buf[offset:], *p.PartitionId)
+
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ }
+ return offset
+}
+
+func (p *TQueryIngestBinlogRequest) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ if p.IsSetTabletId() {
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "tablet_id", thrift.I64, 3)
+ offset += bthrift.Binary.WriteI64(buf[offset:], *p.TabletId)
+
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ }
+ return offset
+}
+
+func (p *TQueryIngestBinlogRequest) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ if p.IsSetLoadId() {
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "load_id", thrift.STRUCT, 4)
+ offset += p.LoadId.FastWriteNocopy(buf[offset:], binaryWriter)
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ }
+ return offset
+}
+
+func (p *TQueryIngestBinlogRequest) field1Length() int {
+ l := 0
+ if p.IsSetTxnId() {
+ l += bthrift.Binary.FieldBeginLength("txn_id", thrift.I64, 1)
+ l += bthrift.Binary.I64Length(*p.TxnId)
+
+ l += bthrift.Binary.FieldEndLength()
+ }
+ return l
+}
+
+func (p *TQueryIngestBinlogRequest) field2Length() int {
+ l := 0
+ if p.IsSetPartitionId() {
+ l += bthrift.Binary.FieldBeginLength("partition_id", thrift.I64, 2)
+ l += bthrift.Binary.I64Length(*p.PartitionId)
+
+ l += bthrift.Binary.FieldEndLength()
+ }
+ return l
+}
+
+func (p *TQueryIngestBinlogRequest) field3Length() int {
+ l := 0
+ if p.IsSetTabletId() {
+ l += bthrift.Binary.FieldBeginLength("tablet_id", thrift.I64, 3)
+ l += bthrift.Binary.I64Length(*p.TabletId)
+
+ l += bthrift.Binary.FieldEndLength()
+ }
+ return l
+}
+
+func (p *TQueryIngestBinlogRequest) field4Length() int {
+ l := 0
+ if p.IsSetLoadId() {
+ l += bthrift.Binary.FieldBeginLength("load_id", thrift.STRUCT, 4)
+ l += p.LoadId.BLength()
+ l += bthrift.Binary.FieldEndLength()
+ }
+ return l
+}
+
+func (p *TQueryIngestBinlogResult_) FastRead(buf []byte) (int, error) {
+ var err error
+ var offset int
+ var l int
+ var fieldTypeId thrift.TType
+ var fieldId int16
+ _, l, err = bthrift.Binary.ReadStructBegin(buf)
+ offset += l
+ if err != nil {
+ goto ReadStructBeginError
+ }
+
+ for {
+ _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldBeginError
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+ switch fieldId {
+ case 1:
+ if fieldTypeId == thrift.I32 {
+ l, err = p.FastReadField1(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+ case 2:
+ if fieldTypeId == thrift.STRING {
+ l, err = p.FastReadField2(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+ default:
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+
+ l, err = bthrift.Binary.ReadFieldEnd(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldEndError
+ }
+ }
+ l, err = bthrift.Binary.ReadStructEnd(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadStructEndError
+ }
+
+ return offset, nil
+ReadStructBeginError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err)
+ReadFieldBeginError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
+ReadFieldError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TQueryIngestBinlogResult_[fieldId]), err)
+SkipFieldError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
+ReadFieldEndError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err)
+ReadStructEndError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+}
+
+func (p *TQueryIngestBinlogResult_) FastReadField1(buf []byte) (int, error) {
+ offset := 0
+
+ if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+
+ tmp := TIngestBinlogStatus(v)
+ p.Status = &tmp
+
+ }
+ return offset, nil
+}
+
+func (p *TQueryIngestBinlogResult_) FastReadField2(buf []byte) (int, error) {
+ offset := 0
+
+ if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+ p.ErrMsg = &v
+
+ }
+ return offset, nil
+}
+
+// for compatibility
+func (p *TQueryIngestBinlogResult_) FastWrite(buf []byte) int {
+ return 0
+}
+
+func (p *TQueryIngestBinlogResult_) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TQueryIngestBinlogResult")
+ if p != nil {
+ offset += p.fastWriteField1(buf[offset:], binaryWriter)
+ offset += p.fastWriteField2(buf[offset:], binaryWriter)
+ }
+ offset += bthrift.Binary.WriteFieldStop(buf[offset:])
+ offset += bthrift.Binary.WriteStructEnd(buf[offset:])
+ return offset
+}
+
+func (p *TQueryIngestBinlogResult_) BLength() int {
+ l := 0
+ l += bthrift.Binary.StructBeginLength("TQueryIngestBinlogResult")
+ if p != nil {
+ l += p.field1Length()
+ l += p.field2Length()
+ }
+ l += bthrift.Binary.FieldStopLength()
+ l += bthrift.Binary.StructEndLength()
+ return l
+}
+
+func (p *TQueryIngestBinlogResult_) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ if p.IsSetStatus() {
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "status", thrift.I32, 1)
+ offset += bthrift.Binary.WriteI32(buf[offset:], int32(*p.Status))
+
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ }
+ return offset
+}
+
+func (p *TQueryIngestBinlogResult_) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ if p.IsSetErrMsg() {
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "err_msg", thrift.STRING, 2)
+ offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.ErrMsg)
+
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ }
+ return offset
+}
+
+func (p *TQueryIngestBinlogResult_) field1Length() int {
+ l := 0
+ if p.IsSetStatus() {
+ l += bthrift.Binary.FieldBeginLength("status", thrift.I32, 1)
+ l += bthrift.Binary.I32Length(int32(*p.Status))
+
+ l += bthrift.Binary.FieldEndLength()
+ }
+ return l
+}
+
+func (p *TQueryIngestBinlogResult_) field2Length() int {
+ l := 0
+ if p.IsSetErrMsg() {
+ l += bthrift.Binary.FieldBeginLength("err_msg", thrift.STRING, 2)
+ l += bthrift.Binary.StringLengthNocopy(*p.ErrMsg)
+
+ l += bthrift.Binary.FieldEndLength()
+ }
+ return l
+}
+
+func (p *TWorkloadGroupInfo) FastRead(buf []byte) (int, error) {
+ var err error
+ var offset int
+ var l int
+ var fieldTypeId thrift.TType
+ var fieldId int16
+ _, l, err = bthrift.Binary.ReadStructBegin(buf)
+ offset += l
+ if err != nil {
+ goto ReadStructBeginError
+ }
+
+ for {
+ _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldBeginError
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+ switch fieldId {
+ case 1:
+ if fieldTypeId == thrift.I64 {
+ l, err = p.FastReadField1(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+ case 2:
+ if fieldTypeId == thrift.STRING {
+ l, err = p.FastReadField2(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+ case 3:
+ if fieldTypeId == thrift.I64 {
+ l, err = p.FastReadField3(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+ case 4:
+ if fieldTypeId == thrift.I64 {
+ l, err = p.FastReadField4(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+ case 5:
+ if fieldTypeId == thrift.I32 {
+ l, err = p.FastReadField5(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+ case 6:
+ if fieldTypeId == thrift.STRING {
+ l, err = p.FastReadField6(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+ case 7:
+ if fieldTypeId == thrift.BOOL {
+ l, err = p.FastReadField7(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+ case 8:
+ if fieldTypeId == thrift.BOOL {
+ l, err = p.FastReadField8(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+ case 9:
+ if fieldTypeId == thrift.I32 {
+ l, err = p.FastReadField9(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+ case 10:
+ if fieldTypeId == thrift.I32 {
+ l, err = p.FastReadField10(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+ case 11:
+ if fieldTypeId == thrift.I32 {
+ l, err = p.FastReadField11(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+ case 12:
+ if fieldTypeId == thrift.I32 {
+ l, err = p.FastReadField12(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+ case 13:
+ if fieldTypeId == thrift.I32 {
+ l, err = p.FastReadField13(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+ case 14:
+ if fieldTypeId == thrift.I64 {
+ l, err = p.FastReadField14(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+ case 15:
+ if fieldTypeId == thrift.I64 {
+ l, err = p.FastReadField15(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+ case 16:
+ if fieldTypeId == thrift.STRING {
+ l, err = p.FastReadField16(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+ default:
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+
+ l, err = bthrift.Binary.ReadFieldEnd(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldEndError
+ }
+ }
+ l, err = bthrift.Binary.ReadStructEnd(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadStructEndError
+ }
+
+ return offset, nil
+ReadStructBeginError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err)
+ReadFieldBeginError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
+ReadFieldError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TWorkloadGroupInfo[fieldId]), err)
+SkipFieldError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
+ReadFieldEndError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err)
+ReadStructEndError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+}
+
+func (p *TWorkloadGroupInfo) FastReadField1(buf []byte) (int, error) {
+ offset := 0
+
+ if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+ p.Id = &v
+
+ }
+ return offset, nil
+}
+
+func (p *TWorkloadGroupInfo) FastReadField2(buf []byte) (int, error) {
+ offset := 0
+
+ if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+ p.Name = &v
+
+ }
+ return offset, nil
+}
+
+func (p *TWorkloadGroupInfo) FastReadField3(buf []byte) (int, error) {
+ offset := 0
+
+ if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+ p.Version = &v
+
+ }
+ return offset, nil
+}
+
+func (p *TWorkloadGroupInfo) FastReadField4(buf []byte) (int, error) {
+ offset := 0
+
+ if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+ p.CpuShare = &v
+
+ }
+ return offset, nil
+}
+
+func (p *TWorkloadGroupInfo) FastReadField5(buf []byte) (int, error) {
+ offset := 0
+
+ if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+ p.CpuHardLimit = &v
+
+ }
+ return offset, nil
+}
+
+func (p *TWorkloadGroupInfo) FastReadField6(buf []byte) (int, error) {
+ offset := 0
+
+ if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+ p.MemLimit = &v
+
+ }
+ return offset, nil
+}
+
+func (p *TWorkloadGroupInfo) FastReadField7(buf []byte) (int, error) {
+ offset := 0
+
+ if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+ p.EnableMemoryOvercommit = &v
+
+ }
+ return offset, nil
+}
+
+func (p *TWorkloadGroupInfo) FastReadField8(buf []byte) (int, error) {
+ offset := 0
+
+ if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+ p.EnableCpuHardLimit = &v
+
+ }
+ return offset, nil
+}
+
+func (p *TWorkloadGroupInfo) FastReadField9(buf []byte) (int, error) {
+ offset := 0
+
+ if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+ p.ScanThreadNum = &v
+
+ }
+ return offset, nil
+}
+
+func (p *TWorkloadGroupInfo) FastReadField10(buf []byte) (int, error) {
+ offset := 0
+
+ if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+ p.MaxRemoteScanThreadNum = &v
+
+ }
+ return offset, nil
+}
+
+func (p *TWorkloadGroupInfo) FastReadField11(buf []byte) (int, error) {
+ offset := 0
+
+ if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+ p.MinRemoteScanThreadNum = &v
+
+ }
+ return offset, nil
+}
+
+func (p *TWorkloadGroupInfo) FastReadField12(buf []byte) (int, error) {
+ offset := 0
+
+ if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+ p.MemoryLowWatermark = &v
+
+ }
+ return offset, nil
+}
+
+func (p *TWorkloadGroupInfo) FastReadField13(buf []byte) (int, error) {
+ offset := 0
+
+ if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+ p.MemoryHighWatermark = &v
+
+ }
+ return offset, nil
+}
+
+func (p *TWorkloadGroupInfo) FastReadField14(buf []byte) (int, error) {
+ offset := 0
+
+ if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+ p.ReadBytesPerSecond = &v
+
+ }
+ return offset, nil
+}
+
+func (p *TWorkloadGroupInfo) FastReadField15(buf []byte) (int, error) {
+ offset := 0
+
+ if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+ p.RemoteReadBytesPerSecond = &v
+
+ }
+ return offset, nil
+}
+
+func (p *TWorkloadGroupInfo) FastReadField16(buf []byte) (int, error) {
+ offset := 0
+
+ if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+ p.Tag = &v
+
+ }
+ return offset, nil
+}
+
+// for compatibility
+func (p *TWorkloadGroupInfo) FastWrite(buf []byte) int {
+ return 0
+}
+
+func (p *TWorkloadGroupInfo) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TWorkloadGroupInfo")
+ if p != nil {
+ offset += p.fastWriteField1(buf[offset:], binaryWriter)
+ offset += p.fastWriteField3(buf[offset:], binaryWriter)
+ offset += p.fastWriteField4(buf[offset:], binaryWriter)
+ offset += p.fastWriteField5(buf[offset:], binaryWriter)
+ offset += p.fastWriteField7(buf[offset:], binaryWriter)
+ offset += p.fastWriteField8(buf[offset:], binaryWriter)
+ offset += p.fastWriteField9(buf[offset:], binaryWriter)
+ offset += p.fastWriteField10(buf[offset:], binaryWriter)
+ offset += p.fastWriteField11(buf[offset:], binaryWriter)
+ offset += p.fastWriteField12(buf[offset:], binaryWriter)
+ offset += p.fastWriteField13(buf[offset:], binaryWriter)
+ offset += p.fastWriteField14(buf[offset:], binaryWriter)
+ offset += p.fastWriteField15(buf[offset:], binaryWriter)
+ offset += p.fastWriteField2(buf[offset:], binaryWriter)
+ offset += p.fastWriteField6(buf[offset:], binaryWriter)
+ offset += p.fastWriteField16(buf[offset:], binaryWriter)
+ }
+ offset += bthrift.Binary.WriteFieldStop(buf[offset:])
+ offset += bthrift.Binary.WriteStructEnd(buf[offset:])
+ return offset
+}
+
+func (p *TWorkloadGroupInfo) BLength() int {
+ l := 0
+ l += bthrift.Binary.StructBeginLength("TWorkloadGroupInfo")
+ if p != nil {
+ l += p.field1Length()
+ l += p.field2Length()
+ l += p.field3Length()
+ l += p.field4Length()
+ l += p.field5Length()
+ l += p.field6Length()
+ l += p.field7Length()
+ l += p.field8Length()
+ l += p.field9Length()
+ l += p.field10Length()
+ l += p.field11Length()
+ l += p.field12Length()
+ l += p.field13Length()
+ l += p.field14Length()
+ l += p.field15Length()
+ l += p.field16Length()
+ }
+ l += bthrift.Binary.FieldStopLength()
+ l += bthrift.Binary.StructEndLength()
+ return l
+}
+
+func (p *TWorkloadGroupInfo) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ if p.IsSetId() {
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "id", thrift.I64, 1)
+ offset += bthrift.Binary.WriteI64(buf[offset:], *p.Id)
+
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ }
+ return offset
+}
+
+func (p *TWorkloadGroupInfo) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ if p.IsSetName() {
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "name", thrift.STRING, 2)
+ offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Name)
+
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ }
+ return offset
+}
+
+func (p *TWorkloadGroupInfo) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ if p.IsSetVersion() {
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "version", thrift.I64, 3)
+ offset += bthrift.Binary.WriteI64(buf[offset:], *p.Version)
+
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ }
+ return offset
+}
+
+func (p *TWorkloadGroupInfo) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ if p.IsSetCpuShare() {
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "cpu_share", thrift.I64, 4)
+ offset += bthrift.Binary.WriteI64(buf[offset:], *p.CpuShare)
+
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ }
+ return offset
+}
+
+func (p *TWorkloadGroupInfo) fastWriteField5(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ if p.IsSetCpuHardLimit() {
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "cpu_hard_limit", thrift.I32, 5)
+ offset += bthrift.Binary.WriteI32(buf[offset:], *p.CpuHardLimit)
+
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ }
+ return offset
+}
+
+func (p *TWorkloadGroupInfo) fastWriteField6(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ if p.IsSetMemLimit() {
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "mem_limit", thrift.STRING, 6)
+ offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.MemLimit)
+
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ }
+ return offset
+}
+
+func (p *TWorkloadGroupInfo) fastWriteField7(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ if p.IsSetEnableMemoryOvercommit() {
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "enable_memory_overcommit", thrift.BOOL, 7)
+ offset += bthrift.Binary.WriteBool(buf[offset:], *p.EnableMemoryOvercommit)
+
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ }
+ return offset
+}
+
+func (p *TWorkloadGroupInfo) fastWriteField8(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ if p.IsSetEnableCpuHardLimit() {
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "enable_cpu_hard_limit", thrift.BOOL, 8)
+ offset += bthrift.Binary.WriteBool(buf[offset:], *p.EnableCpuHardLimit)
+
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ }
+ return offset
+}
+
+func (p *TWorkloadGroupInfo) fastWriteField9(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ if p.IsSetScanThreadNum() {
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "scan_thread_num", thrift.I32, 9)
+ offset += bthrift.Binary.WriteI32(buf[offset:], *p.ScanThreadNum)
+
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ }
+ return offset
+}
+
+func (p *TWorkloadGroupInfo) fastWriteField10(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ if p.IsSetMaxRemoteScanThreadNum() {
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "max_remote_scan_thread_num", thrift.I32, 10)
+ offset += bthrift.Binary.WriteI32(buf[offset:], *p.MaxRemoteScanThreadNum)
+
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ }
+ return offset
+}
+
+func (p *TWorkloadGroupInfo) fastWriteField11(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ if p.IsSetMinRemoteScanThreadNum() {
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "min_remote_scan_thread_num", thrift.I32, 11)
+ offset += bthrift.Binary.WriteI32(buf[offset:], *p.MinRemoteScanThreadNum)
+
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ }
+ return offset
+}
+
+func (p *TWorkloadGroupInfo) fastWriteField12(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ if p.IsSetMemoryLowWatermark() {
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "memory_low_watermark", thrift.I32, 12)
+ offset += bthrift.Binary.WriteI32(buf[offset:], *p.MemoryLowWatermark)
+
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ }
+ return offset
+}
+
+func (p *TWorkloadGroupInfo) fastWriteField13(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ if p.IsSetMemoryHighWatermark() {
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "memory_high_watermark", thrift.I32, 13)
+ offset += bthrift.Binary.WriteI32(buf[offset:], *p.MemoryHighWatermark)
+
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ }
+ return offset
+}
+
+func (p *TWorkloadGroupInfo) fastWriteField14(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ if p.IsSetReadBytesPerSecond() {
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "read_bytes_per_second", thrift.I64, 14)
+ offset += bthrift.Binary.WriteI64(buf[offset:], *p.ReadBytesPerSecond)
+
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ }
+ return offset
+}
+
+func (p *TWorkloadGroupInfo) fastWriteField15(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ if p.IsSetRemoteReadBytesPerSecond() {
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "remote_read_bytes_per_second", thrift.I64, 15)
+ offset += bthrift.Binary.WriteI64(buf[offset:], *p.RemoteReadBytesPerSecond)
+
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ }
+ return offset
+}
+
+func (p *TWorkloadGroupInfo) fastWriteField16(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ if p.IsSetTag() {
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "tag", thrift.STRING, 16)
+ offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Tag)
+
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ }
+ return offset
+}
+
+func (p *TWorkloadGroupInfo) field1Length() int {
+ l := 0
+ if p.IsSetId() {
+ l += bthrift.Binary.FieldBeginLength("id", thrift.I64, 1)
+ l += bthrift.Binary.I64Length(*p.Id)
+
+ l += bthrift.Binary.FieldEndLength()
+ }
+ return l
+}
+
+func (p *TWorkloadGroupInfo) field2Length() int {
+ l := 0
+ if p.IsSetName() {
+ l += bthrift.Binary.FieldBeginLength("name", thrift.STRING, 2)
+ l += bthrift.Binary.StringLengthNocopy(*p.Name)
+
+ l += bthrift.Binary.FieldEndLength()
+ }
+ return l
+}
+
+func (p *TWorkloadGroupInfo) field3Length() int {
+ l := 0
+ if p.IsSetVersion() {
+ l += bthrift.Binary.FieldBeginLength("version", thrift.I64, 3)
+ l += bthrift.Binary.I64Length(*p.Version)
+
+ l += bthrift.Binary.FieldEndLength()
+ }
+ return l
+}
+
+func (p *TWorkloadGroupInfo) field4Length() int {
+ l := 0
+ if p.IsSetCpuShare() {
+ l += bthrift.Binary.FieldBeginLength("cpu_share", thrift.I64, 4)
+ l += bthrift.Binary.I64Length(*p.CpuShare)
+
+ l += bthrift.Binary.FieldEndLength()
+ }
+ return l
+}
+
+func (p *TWorkloadGroupInfo) field5Length() int {
+ l := 0
+ if p.IsSetCpuHardLimit() {
+ l += bthrift.Binary.FieldBeginLength("cpu_hard_limit", thrift.I32, 5)
+ l += bthrift.Binary.I32Length(*p.CpuHardLimit)
+
+ l += bthrift.Binary.FieldEndLength()
+ }
+ return l
+}
+
+func (p *TWorkloadGroupInfo) field6Length() int {
+ l := 0
+ if p.IsSetMemLimit() {
+ l += bthrift.Binary.FieldBeginLength("mem_limit", thrift.STRING, 6)
+ l += bthrift.Binary.StringLengthNocopy(*p.MemLimit)
+
+ l += bthrift.Binary.FieldEndLength()
+ }
+ return l
+}
+
+func (p *TWorkloadGroupInfo) field7Length() int {
+ l := 0
+ if p.IsSetEnableMemoryOvercommit() {
+ l += bthrift.Binary.FieldBeginLength("enable_memory_overcommit", thrift.BOOL, 7)
+ l += bthrift.Binary.BoolLength(*p.EnableMemoryOvercommit)
+
+ l += bthrift.Binary.FieldEndLength()
+ }
+ return l
+}
+
+func (p *TWorkloadGroupInfo) field8Length() int {
+ l := 0
+ if p.IsSetEnableCpuHardLimit() {
+ l += bthrift.Binary.FieldBeginLength("enable_cpu_hard_limit", thrift.BOOL, 8)
+ l += bthrift.Binary.BoolLength(*p.EnableCpuHardLimit)
+
+ l += bthrift.Binary.FieldEndLength()
+ }
+ return l
+}
+
+func (p *TWorkloadGroupInfo) field9Length() int {
+ l := 0
+ if p.IsSetScanThreadNum() {
+ l += bthrift.Binary.FieldBeginLength("scan_thread_num", thrift.I32, 9)
+ l += bthrift.Binary.I32Length(*p.ScanThreadNum)
+
+ l += bthrift.Binary.FieldEndLength()
+ }
+ return l
+}
+
+func (p *TWorkloadGroupInfo) field10Length() int {
+ l := 0
+ if p.IsSetMaxRemoteScanThreadNum() {
+ l += bthrift.Binary.FieldBeginLength("max_remote_scan_thread_num", thrift.I32, 10)
+ l += bthrift.Binary.I32Length(*p.MaxRemoteScanThreadNum)
+
+ l += bthrift.Binary.FieldEndLength()
+ }
+ return l
+}
+
+func (p *TWorkloadGroupInfo) field11Length() int {
+ l := 0
+ if p.IsSetMinRemoteScanThreadNum() {
+ l += bthrift.Binary.FieldBeginLength("min_remote_scan_thread_num", thrift.I32, 11)
+ l += bthrift.Binary.I32Length(*p.MinRemoteScanThreadNum)
+
+ l += bthrift.Binary.FieldEndLength()
+ }
+ return l
+}
+
+func (p *TWorkloadGroupInfo) field12Length() int {
+ l := 0
+ if p.IsSetMemoryLowWatermark() {
+ l += bthrift.Binary.FieldBeginLength("memory_low_watermark", thrift.I32, 12)
+ l += bthrift.Binary.I32Length(*p.MemoryLowWatermark)
+
+ l += bthrift.Binary.FieldEndLength()
+ }
+ return l
+}
+
+func (p *TWorkloadGroupInfo) field13Length() int {
+ l := 0
+ if p.IsSetMemoryHighWatermark() {
+ l += bthrift.Binary.FieldBeginLength("memory_high_watermark", thrift.I32, 13)
+ l += bthrift.Binary.I32Length(*p.MemoryHighWatermark)
+
+ l += bthrift.Binary.FieldEndLength()
+ }
+ return l
+}
+
+func (p *TWorkloadGroupInfo) field14Length() int {
+ l := 0
+ if p.IsSetReadBytesPerSecond() {
+ l += bthrift.Binary.FieldBeginLength("read_bytes_per_second", thrift.I64, 14)
+ l += bthrift.Binary.I64Length(*p.ReadBytesPerSecond)
+
+ l += bthrift.Binary.FieldEndLength()
+ }
+ return l
+}
+
+func (p *TWorkloadGroupInfo) field15Length() int {
+ l := 0
+ if p.IsSetRemoteReadBytesPerSecond() {
+ l += bthrift.Binary.FieldBeginLength("remote_read_bytes_per_second", thrift.I64, 15)
+ l += bthrift.Binary.I64Length(*p.RemoteReadBytesPerSecond)
+
+ l += bthrift.Binary.FieldEndLength()
+ }
+ return l
+}
+
+func (p *TWorkloadGroupInfo) field16Length() int {
+ l := 0
+ if p.IsSetTag() {
+ l += bthrift.Binary.FieldBeginLength("tag", thrift.STRING, 16)
+ l += bthrift.Binary.StringLengthNocopy(*p.Tag)
+
+ l += bthrift.Binary.FieldEndLength()
+ }
+ return l
+}
+
+func (p *TWorkloadCondition) FastRead(buf []byte) (int, error) {
+ var err error
+ var offset int
+ var l int
+ var fieldTypeId thrift.TType
+ var fieldId int16
+ _, l, err = bthrift.Binary.ReadStructBegin(buf)
+ offset += l
+ if err != nil {
+ goto ReadStructBeginError
+ }
+
+ for {
+ _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldBeginError
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+ switch fieldId {
+ case 1:
+ if fieldTypeId == thrift.I32 {
+ l, err = p.FastReadField1(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+ case 2:
+ if fieldTypeId == thrift.I32 {
+ l, err = p.FastReadField2(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+ case 3:
+ if fieldTypeId == thrift.STRING {
+ l, err = p.FastReadField3(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+ default:
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+
+ l, err = bthrift.Binary.ReadFieldEnd(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldEndError
+ }
+ }
+ l, err = bthrift.Binary.ReadStructEnd(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadStructEndError
+ }
+
+ return offset, nil
+ReadStructBeginError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err)
+ReadFieldBeginError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
+ReadFieldError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TWorkloadCondition[fieldId]), err)
+SkipFieldError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
+ReadFieldEndError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err)
+ReadStructEndError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+}
+
+func (p *TWorkloadCondition) FastReadField1(buf []byte) (int, error) {
+ offset := 0
+
+ if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+
+ tmp := TWorkloadMetricType(v)
+ p.MetricName = &tmp
+
+ }
+ return offset, nil
+}
+
+func (p *TWorkloadCondition) FastReadField2(buf []byte) (int, error) {
+ offset := 0
+
+ if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+
+ tmp := TCompareOperator(v)
+ p.Op = &tmp
+
+ }
+ return offset, nil
+}
+
+func (p *TWorkloadCondition) FastReadField3(buf []byte) (int, error) {
+ offset := 0
+
+ if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+ p.Value = &v
+
+ }
+ return offset, nil
+}
+
+// for compatibility
+func (p *TWorkloadCondition) FastWrite(buf []byte) int {
+ return 0
+}
+
+func (p *TWorkloadCondition) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TWorkloadCondition")
+ if p != nil {
+ offset += p.fastWriteField1(buf[offset:], binaryWriter)
+ offset += p.fastWriteField2(buf[offset:], binaryWriter)
+ offset += p.fastWriteField3(buf[offset:], binaryWriter)
+ }
+ offset += bthrift.Binary.WriteFieldStop(buf[offset:])
+ offset += bthrift.Binary.WriteStructEnd(buf[offset:])
+ return offset
+}
+
+func (p *TWorkloadCondition) BLength() int {
+ l := 0
+ l += bthrift.Binary.StructBeginLength("TWorkloadCondition")
+ if p != nil {
+ l += p.field1Length()
+ l += p.field2Length()
+ l += p.field3Length()
+ }
+ l += bthrift.Binary.FieldStopLength()
+ l += bthrift.Binary.StructEndLength()
+ return l
+}
+
+func (p *TWorkloadCondition) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ if p.IsSetMetricName() {
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "metric_name", thrift.I32, 1)
+ offset += bthrift.Binary.WriteI32(buf[offset:], int32(*p.MetricName))
+
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ }
+ return offset
+}
+
+func (p *TWorkloadCondition) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ if p.IsSetOp() {
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "op", thrift.I32, 2)
+ offset += bthrift.Binary.WriteI32(buf[offset:], int32(*p.Op))
+
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ }
+ return offset
+}
+
+func (p *TWorkloadCondition) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ if p.IsSetValue() {
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "value", thrift.STRING, 3)
+ offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Value)
+
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ }
+ return offset
+}
+
+func (p *TWorkloadCondition) field1Length() int {
+ l := 0
+ if p.IsSetMetricName() {
+ l += bthrift.Binary.FieldBeginLength("metric_name", thrift.I32, 1)
+ l += bthrift.Binary.I32Length(int32(*p.MetricName))
+
+ l += bthrift.Binary.FieldEndLength()
+ }
+ return l
+}
+
+func (p *TWorkloadCondition) field2Length() int {
+ l := 0
+ if p.IsSetOp() {
+ l += bthrift.Binary.FieldBeginLength("op", thrift.I32, 2)
+ l += bthrift.Binary.I32Length(int32(*p.Op))
+
+ l += bthrift.Binary.FieldEndLength()
+ }
+ return l
+}
+
+func (p *TWorkloadCondition) field3Length() int {
+ l := 0
+ if p.IsSetValue() {
+ l += bthrift.Binary.FieldBeginLength("value", thrift.STRING, 3)
+ l += bthrift.Binary.StringLengthNocopy(*p.Value)
+
+ l += bthrift.Binary.FieldEndLength()
+ }
+ return l
+}
+
+func (p *TWorkloadAction) FastRead(buf []byte) (int, error) {
+ var err error
+ var offset int
+ var l int
+ var fieldTypeId thrift.TType
+ var fieldId int16
+ _, l, err = bthrift.Binary.ReadStructBegin(buf)
+ offset += l
+ if err != nil {
+ goto ReadStructBeginError
+ }
+
+ for {
+ _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldBeginError
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+ switch fieldId {
+ case 1:
+ if fieldTypeId == thrift.I32 {
+ l, err = p.FastReadField1(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+ case 2:
+ if fieldTypeId == thrift.STRING {
+ l, err = p.FastReadField2(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+ default:
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+
+ l, err = bthrift.Binary.ReadFieldEnd(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldEndError
+ }
+ }
+ l, err = bthrift.Binary.ReadStructEnd(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadStructEndError
+ }
+
+ return offset, nil
+ReadStructBeginError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err)
+ReadFieldBeginError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
+ReadFieldError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TWorkloadAction[fieldId]), err)
+SkipFieldError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
+ReadFieldEndError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err)
+ReadStructEndError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+}
+
+func (p *TWorkloadAction) FastReadField1(buf []byte) (int, error) {
+ offset := 0
+
+ if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+
+ tmp := TWorkloadActionType(v)
+ p.Action = &tmp
+
+ }
+ return offset, nil
+}
+
+func (p *TWorkloadAction) FastReadField2(buf []byte) (int, error) {
+ offset := 0
+
+ if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+ p.ActionArgs_ = &v
+
+ }
+ return offset, nil
+}
+
+// for compatibility
+func (p *TWorkloadAction) FastWrite(buf []byte) int {
+ return 0
+}
+
+func (p *TWorkloadAction) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TWorkloadAction")
+ if p != nil {
+ offset += p.fastWriteField1(buf[offset:], binaryWriter)
+ offset += p.fastWriteField2(buf[offset:], binaryWriter)
+ }
+ offset += bthrift.Binary.WriteFieldStop(buf[offset:])
+ offset += bthrift.Binary.WriteStructEnd(buf[offset:])
+ return offset
+}
+
+func (p *TWorkloadAction) BLength() int {
+ l := 0
+ l += bthrift.Binary.StructBeginLength("TWorkloadAction")
+ if p != nil {
+ l += p.field1Length()
+ l += p.field2Length()
+ }
+ l += bthrift.Binary.FieldStopLength()
+ l += bthrift.Binary.StructEndLength()
+ return l
+}
+
+func (p *TWorkloadAction) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ if p.IsSetAction() {
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "action", thrift.I32, 1)
+ offset += bthrift.Binary.WriteI32(buf[offset:], int32(*p.Action))
+
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ }
+ return offset
+}
+
+func (p *TWorkloadAction) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ if p.IsSetActionArgs_() {
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "action_args", thrift.STRING, 2)
+ offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.ActionArgs_)
+
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ }
+ return offset
+}
+
+func (p *TWorkloadAction) field1Length() int {
+ l := 0
+ if p.IsSetAction() {
+ l += bthrift.Binary.FieldBeginLength("action", thrift.I32, 1)
+ l += bthrift.Binary.I32Length(int32(*p.Action))
+
+ l += bthrift.Binary.FieldEndLength()
+ }
+ return l
+}
+
+func (p *TWorkloadAction) field2Length() int {
+ l := 0
+ if p.IsSetActionArgs_() {
+ l += bthrift.Binary.FieldBeginLength("action_args", thrift.STRING, 2)
+ l += bthrift.Binary.StringLengthNocopy(*p.ActionArgs_)
+
+ l += bthrift.Binary.FieldEndLength()
+ }
+ return l
+}
+
+func (p *TWorkloadSchedPolicy) FastRead(buf []byte) (int, error) {
+ var err error
+ var offset int
+ var l int
+ var fieldTypeId thrift.TType
+ var fieldId int16
+ _, l, err = bthrift.Binary.ReadStructBegin(buf)
+ offset += l
+ if err != nil {
+ goto ReadStructBeginError
+ }
+
+ for {
+ _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldBeginError
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+ switch fieldId {
+ case 1:
+ if fieldTypeId == thrift.I64 {
+ l, err = p.FastReadField1(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+ case 2:
+ if fieldTypeId == thrift.STRING {
+ l, err = p.FastReadField2(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+ case 3:
+ if fieldTypeId == thrift.I32 {
+ l, err = p.FastReadField3(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+ case 4:
+ if fieldTypeId == thrift.I32 {
+ l, err = p.FastReadField4(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+ case 5:
+ if fieldTypeId == thrift.BOOL {
+ l, err = p.FastReadField5(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+ case 6:
+ if fieldTypeId == thrift.LIST {
+ l, err = p.FastReadField6(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+ case 7:
+ if fieldTypeId == thrift.LIST {
+ l, err = p.FastReadField7(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+ case 8:
+ if fieldTypeId == thrift.LIST {
+ l, err = p.FastReadField8(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+ default:
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+
+ l, err = bthrift.Binary.ReadFieldEnd(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldEndError
+ }
+ }
+ l, err = bthrift.Binary.ReadStructEnd(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadStructEndError
+ }
+
+ return offset, nil
+ReadStructBeginError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err)
+ReadFieldBeginError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
+ReadFieldError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TWorkloadSchedPolicy[fieldId]), err)
+SkipFieldError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
+ReadFieldEndError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err)
+ReadStructEndError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+}
+
+func (p *TWorkloadSchedPolicy) FastReadField1(buf []byte) (int, error) {
+ offset := 0
+
+ if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+ p.Id = &v
+
+ }
+ return offset, nil
+}
+
+func (p *TWorkloadSchedPolicy) FastReadField2(buf []byte) (int, error) {
+ offset := 0
+
+ if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+ p.Name = &v
+
+ }
+ return offset, nil
+}
+
+func (p *TWorkloadSchedPolicy) FastReadField3(buf []byte) (int, error) {
+ offset := 0
+
+ if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+ p.Version = &v
+
+ }
+ return offset, nil
+}
+
+func (p *TWorkloadSchedPolicy) FastReadField4(buf []byte) (int, error) {
+ offset := 0
+
+ if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+ p.Priority = &v
+
+ }
+ return offset, nil
+}
+
+func (p *TWorkloadSchedPolicy) FastReadField5(buf []byte) (int, error) {
+ offset := 0
+
+ if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+ p.Enabled = &v
+
+ }
+ return offset, nil
+}
+
+func (p *TWorkloadSchedPolicy) FastReadField6(buf []byte) (int, error) {
+ offset := 0
+
+ _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:])
+ offset += l
+ if err != nil {
+ return offset, err
+ }
+ p.ConditionList = make([]*TWorkloadCondition, 0, size)
+ for i := 0; i < size; i++ {
+ _elem := NewTWorkloadCondition()
+ if l, err := _elem.FastRead(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+ }
+
+ p.ConditionList = append(p.ConditionList, _elem)
+ }
+ if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+ }
+ return offset, nil
+}
+
+func (p *TWorkloadSchedPolicy) FastReadField7(buf []byte) (int, error) {
+ offset := 0
+
+ _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:])
+ offset += l
+ if err != nil {
+ return offset, err
+ }
+ p.ActionList = make([]*TWorkloadAction, 0, size)
+ for i := 0; i < size; i++ {
+ _elem := NewTWorkloadAction()
+ if l, err := _elem.FastRead(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+ }
+
+ p.ActionList = append(p.ActionList, _elem)
+ }
+ if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+ }
+ return offset, nil
+}
+
+func (p *TWorkloadSchedPolicy) FastReadField8(buf []byte) (int, error) {
+ offset := 0
+
+ _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:])
+ offset += l
+ if err != nil {
+ return offset, err
+ }
+ p.WgIdList = make([]int64, 0, size)
+ for i := 0; i < size; i++ {
+ var _elem int64
+ if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+
+ _elem = v
+
+ }
+
+ p.WgIdList = append(p.WgIdList, _elem)
+ }
+ if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+ }
+ return offset, nil
+}
+
+// for compatibility
+func (p *TWorkloadSchedPolicy) FastWrite(buf []byte) int {
+ return 0
+}
+
+func (p *TWorkloadSchedPolicy) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TWorkloadSchedPolicy")
+ if p != nil {
+ offset += p.fastWriteField1(buf[offset:], binaryWriter)
+ offset += p.fastWriteField3(buf[offset:], binaryWriter)
+ offset += p.fastWriteField4(buf[offset:], binaryWriter)
+ offset += p.fastWriteField5(buf[offset:], binaryWriter)
+ offset += p.fastWriteField2(buf[offset:], binaryWriter)
+ offset += p.fastWriteField6(buf[offset:], binaryWriter)
+ offset += p.fastWriteField7(buf[offset:], binaryWriter)
+ offset += p.fastWriteField8(buf[offset:], binaryWriter)
+ }
+ offset += bthrift.Binary.WriteFieldStop(buf[offset:])
+ offset += bthrift.Binary.WriteStructEnd(buf[offset:])
+ return offset
+}
+
+func (p *TWorkloadSchedPolicy) BLength() int {
+ l := 0
+ l += bthrift.Binary.StructBeginLength("TWorkloadSchedPolicy")
+ if p != nil {
+ l += p.field1Length()
+ l += p.field2Length()
+ l += p.field3Length()
+ l += p.field4Length()
+ l += p.field5Length()
+ l += p.field6Length()
+ l += p.field7Length()
+ l += p.field8Length()
+ }
+ l += bthrift.Binary.FieldStopLength()
+ l += bthrift.Binary.StructEndLength()
+ return l
+}
+
+func (p *TWorkloadSchedPolicy) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ if p.IsSetId() {
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "id", thrift.I64, 1)
+ offset += bthrift.Binary.WriteI64(buf[offset:], *p.Id)
+
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ }
+ return offset
+}
+
+func (p *TWorkloadSchedPolicy) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ if p.IsSetName() {
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "name", thrift.STRING, 2)
+ offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, *p.Name)
+
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ }
+ return offset
+}
+
+func (p *TWorkloadSchedPolicy) fastWriteField3(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ if p.IsSetVersion() {
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "version", thrift.I32, 3)
+ offset += bthrift.Binary.WriteI32(buf[offset:], *p.Version)
+
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ }
+ return offset
+}
+
+func (p *TWorkloadSchedPolicy) fastWriteField4(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ if p.IsSetPriority() {
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "priority", thrift.I32, 4)
+ offset += bthrift.Binary.WriteI32(buf[offset:], *p.Priority)
+
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ }
+ return offset
+}
+
+func (p *TWorkloadSchedPolicy) fastWriteField5(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ if p.IsSetEnabled() {
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "enabled", thrift.BOOL, 5)
+ offset += bthrift.Binary.WriteBool(buf[offset:], *p.Enabled)
+
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ }
+ return offset
+}
+
+func (p *TWorkloadSchedPolicy) fastWriteField6(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ if p.IsSetConditionList() {
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "condition_list", thrift.LIST, 6)
+ listBeginOffset := offset
+ offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0)
+ var length int
+ for _, v := range p.ConditionList {
+ length++
+ offset += v.FastWriteNocopy(buf[offset:], binaryWriter)
+ }
+ bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length)
+ offset += bthrift.Binary.WriteListEnd(buf[offset:])
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ }
+ return offset
+}
+
+func (p *TWorkloadSchedPolicy) fastWriteField7(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ if p.IsSetActionList() {
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "action_list", thrift.LIST, 7)
+ listBeginOffset := offset
+ offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0)
+ var length int
+ for _, v := range p.ActionList {
+ length++
+ offset += v.FastWriteNocopy(buf[offset:], binaryWriter)
+ }
+ bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length)
+ offset += bthrift.Binary.WriteListEnd(buf[offset:])
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ }
+ return offset
+}
+
+func (p *TWorkloadSchedPolicy) fastWriteField8(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ if p.IsSetWgIdList() {
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "wg_id_list", thrift.LIST, 8)
+ listBeginOffset := offset
+ offset += bthrift.Binary.ListBeginLength(thrift.I64, 0)
+ var length int
+ for _, v := range p.WgIdList {
+ length++
+ offset += bthrift.Binary.WriteI64(buf[offset:], v)
+
+ }
+ bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.I64, length)
+ offset += bthrift.Binary.WriteListEnd(buf[offset:])
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ }
+ return offset
+}
+
+func (p *TWorkloadSchedPolicy) field1Length() int {
+ l := 0
+ if p.IsSetId() {
+ l += bthrift.Binary.FieldBeginLength("id", thrift.I64, 1)
+ l += bthrift.Binary.I64Length(*p.Id)
+
+ l += bthrift.Binary.FieldEndLength()
+ }
+ return l
+}
+
+func (p *TWorkloadSchedPolicy) field2Length() int {
+ l := 0
+ if p.IsSetName() {
+ l += bthrift.Binary.FieldBeginLength("name", thrift.STRING, 2)
+ l += bthrift.Binary.StringLengthNocopy(*p.Name)
+
+ l += bthrift.Binary.FieldEndLength()
+ }
+ return l
+}
+
+func (p *TWorkloadSchedPolicy) field3Length() int {
+ l := 0
+ if p.IsSetVersion() {
+ l += bthrift.Binary.FieldBeginLength("version", thrift.I32, 3)
+ l += bthrift.Binary.I32Length(*p.Version)
+
+ l += bthrift.Binary.FieldEndLength()
+ }
+ return l
+}
+
+func (p *TWorkloadSchedPolicy) field4Length() int {
+ l := 0
+ if p.IsSetPriority() {
+ l += bthrift.Binary.FieldBeginLength("priority", thrift.I32, 4)
+ l += bthrift.Binary.I32Length(*p.Priority)
+
+ l += bthrift.Binary.FieldEndLength()
+ }
+ return l
+}
+
+func (p *TWorkloadSchedPolicy) field5Length() int {
+ l := 0
+ if p.IsSetEnabled() {
+ l += bthrift.Binary.FieldBeginLength("enabled", thrift.BOOL, 5)
+ l += bthrift.Binary.BoolLength(*p.Enabled)
+
+ l += bthrift.Binary.FieldEndLength()
+ }
+ return l
+}
+
+func (p *TWorkloadSchedPolicy) field6Length() int {
+ l := 0
+ if p.IsSetConditionList() {
+ l += bthrift.Binary.FieldBeginLength("condition_list", thrift.LIST, 6)
+ l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.ConditionList))
+ for _, v := range p.ConditionList {
+ l += v.BLength()
+ }
+ l += bthrift.Binary.ListEndLength()
+ l += bthrift.Binary.FieldEndLength()
+ }
+ return l
+}
+
+func (p *TWorkloadSchedPolicy) field7Length() int {
+ l := 0
+ if p.IsSetActionList() {
+ l += bthrift.Binary.FieldBeginLength("action_list", thrift.LIST, 7)
+ l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.ActionList))
+ for _, v := range p.ActionList {
+ l += v.BLength()
+ }
+ l += bthrift.Binary.ListEndLength()
+ l += bthrift.Binary.FieldEndLength()
+ }
+ return l
+}
+
+func (p *TWorkloadSchedPolicy) field8Length() int {
+ l := 0
+ if p.IsSetWgIdList() {
+ l += bthrift.Binary.FieldBeginLength("wg_id_list", thrift.LIST, 8)
+ l += bthrift.Binary.ListBeginLength(thrift.I64, len(p.WgIdList))
+ var tmpV int64
+ l += bthrift.Binary.I64Length(int64(tmpV)) * len(p.WgIdList)
+ l += bthrift.Binary.ListEndLength()
+ l += bthrift.Binary.FieldEndLength()
+ }
+ return l
+}
+
+func (p *TopicInfo) FastRead(buf []byte) (int, error) {
+ var err error
+ var offset int
+ var l int
+ var fieldTypeId thrift.TType
+ var fieldId int16
+ _, l, err = bthrift.Binary.ReadStructBegin(buf)
+ offset += l
+ if err != nil {
+ goto ReadStructBeginError
+ }
+
+ for {
+ _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldBeginError
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+ switch fieldId {
+ case 1:
+ if fieldTypeId == thrift.STRUCT {
+ l, err = p.FastReadField1(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+ case 2:
+ if fieldTypeId == thrift.STRUCT {
+ l, err = p.FastReadField2(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+ default:
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+
+ l, err = bthrift.Binary.ReadFieldEnd(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldEndError
+ }
+ }
+ l, err = bthrift.Binary.ReadStructEnd(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadStructEndError
+ }
+
+ return offset, nil
+ReadStructBeginError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err)
+ReadFieldBeginError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
+ReadFieldError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TopicInfo[fieldId]), err)
+SkipFieldError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
+ReadFieldEndError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err)
+ReadStructEndError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+}
+
+func (p *TopicInfo) FastReadField1(buf []byte) (int, error) {
+ offset := 0
+
+ tmp := NewTWorkloadGroupInfo()
+ if l, err := tmp.FastRead(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+ }
+ p.WorkloadGroupInfo = tmp
+ return offset, nil
+}
+
+func (p *TopicInfo) FastReadField2(buf []byte) (int, error) {
+ offset := 0
+
+ tmp := NewTWorkloadSchedPolicy()
+ if l, err := tmp.FastRead(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+ }
+ p.WorkloadSchedPolicy = tmp
+ return offset, nil
+}
+
+// for compatibility
+func (p *TopicInfo) FastWrite(buf []byte) int {
+ return 0
+}
+
+func (p *TopicInfo) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TopicInfo")
+ if p != nil {
+ offset += p.fastWriteField1(buf[offset:], binaryWriter)
+ offset += p.fastWriteField2(buf[offset:], binaryWriter)
+ }
+ offset += bthrift.Binary.WriteFieldStop(buf[offset:])
+ offset += bthrift.Binary.WriteStructEnd(buf[offset:])
+ return offset
+}
+
+func (p *TopicInfo) BLength() int {
+ l := 0
+ l += bthrift.Binary.StructBeginLength("TopicInfo")
+ if p != nil {
+ l += p.field1Length()
+ l += p.field2Length()
+ }
+ l += bthrift.Binary.FieldStopLength()
+ l += bthrift.Binary.StructEndLength()
+ return l
+}
+
+func (p *TopicInfo) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ if p.IsSetWorkloadGroupInfo() {
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "workload_group_info", thrift.STRUCT, 1)
+ offset += p.WorkloadGroupInfo.FastWriteNocopy(buf[offset:], binaryWriter)
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ }
+ return offset
+}
+
+func (p *TopicInfo) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ if p.IsSetWorkloadSchedPolicy() {
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "workload_sched_policy", thrift.STRUCT, 2)
+ offset += p.WorkloadSchedPolicy.FastWriteNocopy(buf[offset:], binaryWriter)
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ }
+ return offset
+}
+
+func (p *TopicInfo) field1Length() int {
+ l := 0
+ if p.IsSetWorkloadGroupInfo() {
+ l += bthrift.Binary.FieldBeginLength("workload_group_info", thrift.STRUCT, 1)
+ l += p.WorkloadGroupInfo.BLength()
+ l += bthrift.Binary.FieldEndLength()
+ }
+ return l
+}
+
+func (p *TopicInfo) field2Length() int {
+ l := 0
+ if p.IsSetWorkloadSchedPolicy() {
+ l += bthrift.Binary.FieldBeginLength("workload_sched_policy", thrift.STRUCT, 2)
+ l += p.WorkloadSchedPolicy.BLength()
+ l += bthrift.Binary.FieldEndLength()
+ }
+ return l
+}
+
+func (p *TPublishTopicRequest) FastRead(buf []byte) (int, error) {
+ var err error
+ var offset int
+ var l int
+ var fieldTypeId thrift.TType
+ var fieldId int16
+ var issetTopicMap bool = false
+ _, l, err = bthrift.Binary.ReadStructBegin(buf)
+ offset += l
+ if err != nil {
+ goto ReadStructBeginError
+ }
+
+ for {
+ _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldBeginError
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+ switch fieldId {
+ case 1:
+ if fieldTypeId == thrift.MAP {
+ l, err = p.FastReadField1(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ issetTopicMap = true
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+ default:
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+
+ l, err = bthrift.Binary.ReadFieldEnd(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldEndError
+ }
+ }
+ l, err = bthrift.Binary.ReadStructEnd(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadStructEndError
+ }
+
+ if !issetTopicMap {
+ fieldId = 1
+ goto RequiredFieldNotSetError
+ }
+ return offset, nil
+ReadStructBeginError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err)
+ReadFieldBeginError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
+ReadFieldError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TPublishTopicRequest[fieldId]), err)
+SkipFieldError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
+ReadFieldEndError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err)
+ReadStructEndError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+RequiredFieldNotSetError:
+ return offset, thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TPublishTopicRequest[fieldId]))
+}
+
+func (p *TPublishTopicRequest) FastReadField1(buf []byte) (int, error) {
+ offset := 0
+
+ _, _, size, l, err := bthrift.Binary.ReadMapBegin(buf[offset:])
+ offset += l
+ if err != nil {
+ return offset, err
+ }
+ p.TopicMap = make(map[TTopicInfoType][]*TopicInfo, size)
+ for i := 0; i < size; i++ {
+ var _key TTopicInfoType
+ if v, l, err := bthrift.Binary.ReadI32(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+
+ _key = TTopicInfoType(v)
+
+ }
+
+ _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:])
+ offset += l
+ if err != nil {
+ return offset, err
+ }
+ _val := make([]*TopicInfo, 0, size)
+ for i := 0; i < size; i++ {
+ _elem := NewTopicInfo()
+ if l, err := _elem.FastRead(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+ }
+
+ _val = append(_val, _elem)
+ }
+ if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+ }
+
+ p.TopicMap[_key] = _val
+ }
+ if l, err := bthrift.Binary.ReadMapEnd(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+ }
+ return offset, nil
+}
+
+// for compatibility
+func (p *TPublishTopicRequest) FastWrite(buf []byte) int {
+ return 0
+}
+
+func (p *TPublishTopicRequest) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TPublishTopicRequest")
+ if p != nil {
+ offset += p.fastWriteField1(buf[offset:], binaryWriter)
+ }
+ offset += bthrift.Binary.WriteFieldStop(buf[offset:])
+ offset += bthrift.Binary.WriteStructEnd(buf[offset:])
+ return offset
+}
+
+func (p *TPublishTopicRequest) BLength() int {
+ l := 0
+ l += bthrift.Binary.StructBeginLength("TPublishTopicRequest")
+ if p != nil {
+ l += p.field1Length()
+ }
+ l += bthrift.Binary.FieldStopLength()
+ l += bthrift.Binary.StructEndLength()
+ return l
+}
+
+func (p *TPublishTopicRequest) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "topic_map", thrift.MAP, 1)
+ mapBeginOffset := offset
+ offset += bthrift.Binary.MapBeginLength(thrift.I32, thrift.LIST, 0)
+ var length int
+ for k, v := range p.TopicMap {
+ length++
+
+ offset += bthrift.Binary.WriteI32(buf[offset:], int32(k))
+
+ listBeginOffset := offset
+ offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0)
+ var length int
+ for _, v := range v {
+ length++
+ offset += v.FastWriteNocopy(buf[offset:], binaryWriter)
+ }
+ bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length)
+ offset += bthrift.Binary.WriteListEnd(buf[offset:])
+ }
+ bthrift.Binary.WriteMapBegin(buf[mapBeginOffset:], thrift.I32, thrift.LIST, length)
+ offset += bthrift.Binary.WriteMapEnd(buf[offset:])
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ return offset
+}
+
+func (p *TPublishTopicRequest) field1Length() int {
+ l := 0
+ l += bthrift.Binary.FieldBeginLength("topic_map", thrift.MAP, 1)
+ l += bthrift.Binary.MapBeginLength(thrift.I32, thrift.LIST, len(p.TopicMap))
+ for k, v := range p.TopicMap {
+
+ l += bthrift.Binary.I32Length(int32(k))
+
+ l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(v))
+ for _, v := range v {
+ l += v.BLength()
+ }
+ l += bthrift.Binary.ListEndLength()
+ }
+ l += bthrift.Binary.MapEndLength()
+ l += bthrift.Binary.FieldEndLength()
+ return l
+}
+
+func (p *TPublishTopicResult_) FastRead(buf []byte) (int, error) {
+ var err error
+ var offset int
+ var l int
+ var fieldTypeId thrift.TType
+ var fieldId int16
+ var issetStatus bool = false
+ _, l, err = bthrift.Binary.ReadStructBegin(buf)
+ offset += l
+ if err != nil {
+ goto ReadStructBeginError
+ }
+
+ for {
+ _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldBeginError
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+ switch fieldId {
+ case 1:
+ if fieldTypeId == thrift.STRUCT {
+ l, err = p.FastReadField1(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ issetStatus = true
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+ default:
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+
+ l, err = bthrift.Binary.ReadFieldEnd(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldEndError
+ }
+ }
+ l, err = bthrift.Binary.ReadStructEnd(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadStructEndError
+ }
+
+ if !issetStatus {
+ fieldId = 1
+ goto RequiredFieldNotSetError
+ }
+ return offset, nil
+ReadStructBeginError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err)
+ReadFieldBeginError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
+ReadFieldError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TPublishTopicResult_[fieldId]), err)
+SkipFieldError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
+ReadFieldEndError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err)
+ReadStructEndError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+RequiredFieldNotSetError:
+ return offset, thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("required field %s is not set", fieldIDToName_TPublishTopicResult_[fieldId]))
+}
+
+func (p *TPublishTopicResult_) FastReadField1(buf []byte) (int, error) {
+ offset := 0
+
+ tmp := status.NewTStatus()
+ if l, err := tmp.FastRead(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+ }
+ p.Status = tmp
+ return offset, nil
+}
+
+// for compatibility
+func (p *TPublishTopicResult_) FastWrite(buf []byte) int {
+ return 0
+}
+
+func (p *TPublishTopicResult_) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TPublishTopicResult")
+ if p != nil {
+ offset += p.fastWriteField1(buf[offset:], binaryWriter)
+ }
+ offset += bthrift.Binary.WriteFieldStop(buf[offset:])
+ offset += bthrift.Binary.WriteStructEnd(buf[offset:])
+ return offset
+}
+
+func (p *TPublishTopicResult_) BLength() int {
+ l := 0
+ l += bthrift.Binary.StructBeginLength("TPublishTopicResult")
+ if p != nil {
+ l += p.field1Length()
+ }
+ l += bthrift.Binary.FieldStopLength()
+ l += bthrift.Binary.StructEndLength()
+ return l
+}
+
+func (p *TPublishTopicResult_) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "status", thrift.STRUCT, 1)
+ offset += p.Status.FastWriteNocopy(buf[offset:], binaryWriter)
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ return offset
+}
+
+func (p *TPublishTopicResult_) field1Length() int {
+ l := 0
+ l += bthrift.Binary.FieldBeginLength("status", thrift.STRUCT, 1)
+ l += p.Status.BLength()
+ l += bthrift.Binary.FieldEndLength()
+ return l
+}
+
+func (p *TGetRealtimeExecStatusRequest) FastRead(buf []byte) (int, error) {
+ var err error
+ var offset int
+ var l int
+ var fieldTypeId thrift.TType
+ var fieldId int16
+ _, l, err = bthrift.Binary.ReadStructBegin(buf)
+ offset += l
+ if err != nil {
+ goto ReadStructBeginError
+ }
+
+ for {
+ _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldBeginError
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+ switch fieldId {
+ case 1:
+ if fieldTypeId == thrift.STRUCT {
+ l, err = p.FastReadField1(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+ default:
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+
+ l, err = bthrift.Binary.ReadFieldEnd(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldEndError
+ }
+ }
+ l, err = bthrift.Binary.ReadStructEnd(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadStructEndError
+ }
+
+ return offset, nil
+ReadStructBeginError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err)
+ReadFieldBeginError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
+ReadFieldError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TGetRealtimeExecStatusRequest[fieldId]), err)
+SkipFieldError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
+ReadFieldEndError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err)
+ReadStructEndError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+}
+
+func (p *TGetRealtimeExecStatusRequest) FastReadField1(buf []byte) (int, error) {
+ offset := 0
+
+ tmp := types.NewTUniqueId()
+ if l, err := tmp.FastRead(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+ }
+ p.Id = tmp
+ return offset, nil
+}
+
+// for compatibility
+func (p *TGetRealtimeExecStatusRequest) FastWrite(buf []byte) int {
+ return 0
+}
+
+func (p *TGetRealtimeExecStatusRequest) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TGetRealtimeExecStatusRequest")
+ if p != nil {
+ offset += p.fastWriteField1(buf[offset:], binaryWriter)
+ }
+ offset += bthrift.Binary.WriteFieldStop(buf[offset:])
+ offset += bthrift.Binary.WriteStructEnd(buf[offset:])
+ return offset
+}
+
+func (p *TGetRealtimeExecStatusRequest) BLength() int {
+ l := 0
+ l += bthrift.Binary.StructBeginLength("TGetRealtimeExecStatusRequest")
+ if p != nil {
+ l += p.field1Length()
+ }
+ l += bthrift.Binary.FieldStopLength()
+ l += bthrift.Binary.StructEndLength()
+ return l
+}
+
+func (p *TGetRealtimeExecStatusRequest) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ if p.IsSetId() {
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "id", thrift.STRUCT, 1)
+ offset += p.Id.FastWriteNocopy(buf[offset:], binaryWriter)
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ }
+ return offset
+}
+
+func (p *TGetRealtimeExecStatusRequest) field1Length() int {
+ l := 0
+ if p.IsSetId() {
+ l += bthrift.Binary.FieldBeginLength("id", thrift.STRUCT, 1)
+ l += p.Id.BLength()
+ l += bthrift.Binary.FieldEndLength()
+ }
+ return l
+}
+
+func (p *TGetRealtimeExecStatusResponse) FastRead(buf []byte) (int, error) {
+ var err error
+ var offset int
+ var l int
+ var fieldTypeId thrift.TType
+ var fieldId int16
+ _, l, err = bthrift.Binary.ReadStructBegin(buf)
+ offset += l
+ if err != nil {
+ goto ReadStructBeginError
+ }
+
+ for {
+ _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldBeginError
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+ switch fieldId {
+ case 1:
+ if fieldTypeId == thrift.STRUCT {
+ l, err = p.FastReadField1(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+ case 2:
+ if fieldTypeId == thrift.STRUCT {
+ l, err = p.FastReadField2(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+ default:
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+
+ l, err = bthrift.Binary.ReadFieldEnd(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldEndError
+ }
+ }
+ l, err = bthrift.Binary.ReadStructEnd(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadStructEndError
+ }
+
+ return offset, nil
+ReadStructBeginError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err)
+ReadFieldBeginError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
+ReadFieldError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TGetRealtimeExecStatusResponse[fieldId]), err)
+SkipFieldError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
+ReadFieldEndError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err)
+ReadStructEndError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+}
+
+func (p *TGetRealtimeExecStatusResponse) FastReadField1(buf []byte) (int, error) {
+ offset := 0
+
+ tmp := status.NewTStatus()
+ if l, err := tmp.FastRead(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+ }
+ p.Status = tmp
+ return offset, nil
+}
+
+func (p *TGetRealtimeExecStatusResponse) FastReadField2(buf []byte) (int, error) {
+ offset := 0
+
+ tmp := frontendservice.NewTReportExecStatusParams()
+ if l, err := tmp.FastRead(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+ }
+ p.ReportExecStatusParams = tmp
+ return offset, nil
+}
+
+// for compatibility
+func (p *TGetRealtimeExecStatusResponse) FastWrite(buf []byte) int {
+ return 0
+}
+
+func (p *TGetRealtimeExecStatusResponse) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TGetRealtimeExecStatusResponse")
+ if p != nil {
+ offset += p.fastWriteField1(buf[offset:], binaryWriter)
+ offset += p.fastWriteField2(buf[offset:], binaryWriter)
+ }
+ offset += bthrift.Binary.WriteFieldStop(buf[offset:])
+ offset += bthrift.Binary.WriteStructEnd(buf[offset:])
+ return offset
+}
+
+func (p *TGetRealtimeExecStatusResponse) BLength() int {
+ l := 0
+ l += bthrift.Binary.StructBeginLength("TGetRealtimeExecStatusResponse")
+ if p != nil {
+ l += p.field1Length()
+ l += p.field2Length()
+ }
+ l += bthrift.Binary.FieldStopLength()
+ l += bthrift.Binary.StructEndLength()
+ return l
+}
+
+func (p *TGetRealtimeExecStatusResponse) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ if p.IsSetStatus() {
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "status", thrift.STRUCT, 1)
+ offset += p.Status.FastWriteNocopy(buf[offset:], binaryWriter)
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ }
+ return offset
+}
+
+func (p *TGetRealtimeExecStatusResponse) fastWriteField2(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ if p.IsSetReportExecStatusParams() {
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "report_exec_status_params", thrift.STRUCT, 2)
+ offset += p.ReportExecStatusParams.FastWriteNocopy(buf[offset:], binaryWriter)
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ }
+ return offset
+}
+
+func (p *TGetRealtimeExecStatusResponse) field1Length() int {
+ l := 0
+ if p.IsSetStatus() {
+ l += bthrift.Binary.FieldBeginLength("status", thrift.STRUCT, 1)
+ l += p.Status.BLength()
+ l += bthrift.Binary.FieldEndLength()
+ }
+ return l
+}
+
+func (p *TGetRealtimeExecStatusResponse) field2Length() int {
+ l := 0
+ if p.IsSetReportExecStatusParams() {
+ l += bthrift.Binary.FieldBeginLength("report_exec_status_params", thrift.STRUCT, 2)
+ l += p.ReportExecStatusParams.BLength()
+ l += bthrift.Binary.FieldEndLength()
+ }
+ return l
+}
+
+func (p *BackendServiceExecPlanFragmentArgs) FastRead(buf []byte) (int, error) {
+ var err error
+ var offset int
+ var l int
+ var fieldTypeId thrift.TType
+ var fieldId int16
+ _, l, err = bthrift.Binary.ReadStructBegin(buf)
+ offset += l
+ if err != nil {
+ goto ReadStructBeginError
+ }
+
+ for {
+ _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldBeginError
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+ switch fieldId {
+ case 1:
+ if fieldTypeId == thrift.STRUCT {
+ l, err = p.FastReadField1(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+ default:
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+
+ l, err = bthrift.Binary.ReadFieldEnd(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldEndError
+ }
+ }
+ l, err = bthrift.Binary.ReadStructEnd(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadStructEndError
+ }
+
+ return offset, nil
+ReadStructBeginError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err)
+ReadFieldBeginError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
+ReadFieldError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceExecPlanFragmentArgs[fieldId]), err)
+SkipFieldError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
+ReadFieldEndError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err)
+ReadStructEndError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+}
+
+func (p *BackendServiceExecPlanFragmentArgs) FastReadField1(buf []byte) (int, error) {
+ offset := 0
+
+ tmp := palointernalservice.NewTExecPlanFragmentParams()
+ if l, err := tmp.FastRead(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+ }
+ p.Params = tmp
+ return offset, nil
+}
+
+// for compatibility
+func (p *BackendServiceExecPlanFragmentArgs) FastWrite(buf []byte) int {
+ return 0
+}
+
+func (p *BackendServiceExecPlanFragmentArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ offset += bthrift.Binary.WriteStructBegin(buf[offset:], "exec_plan_fragment_args")
+ if p != nil {
+ offset += p.fastWriteField1(buf[offset:], binaryWriter)
+ }
+ offset += bthrift.Binary.WriteFieldStop(buf[offset:])
+ offset += bthrift.Binary.WriteStructEnd(buf[offset:])
+ return offset
+}
+
+func (p *BackendServiceExecPlanFragmentArgs) BLength() int {
+ l := 0
+ l += bthrift.Binary.StructBeginLength("exec_plan_fragment_args")
+ if p != nil {
+ l += p.field1Length()
+ }
+ l += bthrift.Binary.FieldStopLength()
+ l += bthrift.Binary.StructEndLength()
+ return l
+}
+
+func (p *BackendServiceExecPlanFragmentArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "params", thrift.STRUCT, 1)
+ offset += p.Params.FastWriteNocopy(buf[offset:], binaryWriter)
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ return offset
+}
+
+func (p *BackendServiceExecPlanFragmentArgs) field1Length() int {
+ l := 0
+ l += bthrift.Binary.FieldBeginLength("params", thrift.STRUCT, 1)
+ l += p.Params.BLength()
+ l += bthrift.Binary.FieldEndLength()
+ return l
+}
+
+func (p *BackendServiceExecPlanFragmentResult) FastRead(buf []byte) (int, error) {
+ var err error
+ var offset int
+ var l int
+ var fieldTypeId thrift.TType
+ var fieldId int16
+ _, l, err = bthrift.Binary.ReadStructBegin(buf)
+ offset += l
+ if err != nil {
+ goto ReadStructBeginError
+ }
+
+ for {
+ _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldBeginError
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+ switch fieldId {
+ case 0:
+ if fieldTypeId == thrift.STRUCT {
+ l, err = p.FastReadField0(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+ default:
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+
+ l, err = bthrift.Binary.ReadFieldEnd(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldEndError
+ }
+ }
+ l, err = bthrift.Binary.ReadStructEnd(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadStructEndError
+ }
+
+ return offset, nil
+ReadStructBeginError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err)
+ReadFieldBeginError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
+ReadFieldError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceExecPlanFragmentResult[fieldId]), err)
+SkipFieldError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
+ReadFieldEndError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err)
+ReadStructEndError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+}
+
+func (p *BackendServiceExecPlanFragmentResult) FastReadField0(buf []byte) (int, error) {
+ offset := 0
+
+ tmp := palointernalservice.NewTExecPlanFragmentResult_()
+ if l, err := tmp.FastRead(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+ }
+ p.Success = tmp
+ return offset, nil
+}
+
+// for compatibility
+func (p *BackendServiceExecPlanFragmentResult) FastWrite(buf []byte) int {
+ return 0
+}
+
+func (p *BackendServiceExecPlanFragmentResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ offset += bthrift.Binary.WriteStructBegin(buf[offset:], "exec_plan_fragment_result")
+ if p != nil {
+ offset += p.fastWriteField0(buf[offset:], binaryWriter)
+ }
+ offset += bthrift.Binary.WriteFieldStop(buf[offset:])
+ offset += bthrift.Binary.WriteStructEnd(buf[offset:])
+ return offset
+}
+
+func (p *BackendServiceExecPlanFragmentResult) BLength() int {
+ l := 0
+ l += bthrift.Binary.StructBeginLength("exec_plan_fragment_result")
+ if p != nil {
+ l += p.field0Length()
+ }
+ l += bthrift.Binary.FieldStopLength()
+ l += bthrift.Binary.StructEndLength()
+ return l
+}
+
+func (p *BackendServiceExecPlanFragmentResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ if p.IsSetSuccess() {
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "success", thrift.STRUCT, 0)
+ offset += p.Success.FastWriteNocopy(buf[offset:], binaryWriter)
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ }
+ return offset
+}
+
+func (p *BackendServiceExecPlanFragmentResult) field0Length() int {
+ l := 0
+ if p.IsSetSuccess() {
+ l += bthrift.Binary.FieldBeginLength("success", thrift.STRUCT, 0)
+ l += p.Success.BLength()
+ l += bthrift.Binary.FieldEndLength()
+ }
+ return l
+}
+
+func (p *BackendServiceCancelPlanFragmentArgs) FastRead(buf []byte) (int, error) {
+ var err error
+ var offset int
+ var l int
+ var fieldTypeId thrift.TType
+ var fieldId int16
+ _, l, err = bthrift.Binary.ReadStructBegin(buf)
+ offset += l
+ if err != nil {
+ goto ReadStructBeginError
+ }
+
+ for {
+ _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldBeginError
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+ switch fieldId {
+ case 1:
+ if fieldTypeId == thrift.STRUCT {
+ l, err = p.FastReadField1(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+ default:
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+
+ l, err = bthrift.Binary.ReadFieldEnd(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldEndError
+ }
+ }
+ l, err = bthrift.Binary.ReadStructEnd(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadStructEndError
+ }
+
+ return offset, nil
+ReadStructBeginError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err)
+ReadFieldBeginError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
+ReadFieldError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceCancelPlanFragmentArgs[fieldId]), err)
+SkipFieldError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
+ReadFieldEndError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err)
+ReadStructEndError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+}
+
+func (p *BackendServiceCancelPlanFragmentArgs) FastReadField1(buf []byte) (int, error) {
+ offset := 0
+
+ tmp := palointernalservice.NewTCancelPlanFragmentParams()
+ if l, err := tmp.FastRead(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+ }
+ p.Params = tmp
+ return offset, nil
+}
+
+// for compatibility
+func (p *BackendServiceCancelPlanFragmentArgs) FastWrite(buf []byte) int {
+ return 0
+}
+
+func (p *BackendServiceCancelPlanFragmentArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ offset += bthrift.Binary.WriteStructBegin(buf[offset:], "cancel_plan_fragment_args")
+ if p != nil {
+ offset += p.fastWriteField1(buf[offset:], binaryWriter)
+ }
+ offset += bthrift.Binary.WriteFieldStop(buf[offset:])
+ offset += bthrift.Binary.WriteStructEnd(buf[offset:])
+ return offset
+}
+
+func (p *BackendServiceCancelPlanFragmentArgs) BLength() int {
+ l := 0
+ l += bthrift.Binary.StructBeginLength("cancel_plan_fragment_args")
+ if p != nil {
+ l += p.field1Length()
+ }
+ l += bthrift.Binary.FieldStopLength()
+ l += bthrift.Binary.StructEndLength()
+ return l
+}
+
+func (p *BackendServiceCancelPlanFragmentArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "params", thrift.STRUCT, 1)
+ offset += p.Params.FastWriteNocopy(buf[offset:], binaryWriter)
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ return offset
+}
+
+func (p *BackendServiceCancelPlanFragmentArgs) field1Length() int {
+ l := 0
+ l += bthrift.Binary.FieldBeginLength("params", thrift.STRUCT, 1)
+ l += p.Params.BLength()
+ l += bthrift.Binary.FieldEndLength()
+ return l
+}
+
+func (p *BackendServiceCancelPlanFragmentResult) FastRead(buf []byte) (int, error) {
+ var err error
+ var offset int
+ var l int
+ var fieldTypeId thrift.TType
+ var fieldId int16
+ _, l, err = bthrift.Binary.ReadStructBegin(buf)
+ offset += l
+ if err != nil {
+ goto ReadStructBeginError
+ }
+
+ for {
+ _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldBeginError
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+ switch fieldId {
+ case 0:
+ if fieldTypeId == thrift.STRUCT {
+ l, err = p.FastReadField0(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+ default:
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+
+ l, err = bthrift.Binary.ReadFieldEnd(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldEndError
+ }
+ }
+ l, err = bthrift.Binary.ReadStructEnd(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadStructEndError
+ }
+
+ return offset, nil
+ReadStructBeginError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err)
+ReadFieldBeginError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
+ReadFieldError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceCancelPlanFragmentResult[fieldId]), err)
+SkipFieldError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
+ReadFieldEndError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err)
+ReadStructEndError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+}
+
+func (p *BackendServiceCancelPlanFragmentResult) FastReadField0(buf []byte) (int, error) {
+ offset := 0
+
+ tmp := palointernalservice.NewTCancelPlanFragmentResult_()
+ if l, err := tmp.FastRead(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+ }
+ p.Success = tmp
+ return offset, nil
+}
+
+// for compatibility
+func (p *BackendServiceCancelPlanFragmentResult) FastWrite(buf []byte) int {
+ return 0
+}
+
+func (p *BackendServiceCancelPlanFragmentResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ offset += bthrift.Binary.WriteStructBegin(buf[offset:], "cancel_plan_fragment_result")
+ if p != nil {
+ offset += p.fastWriteField0(buf[offset:], binaryWriter)
+ }
+ offset += bthrift.Binary.WriteFieldStop(buf[offset:])
+ offset += bthrift.Binary.WriteStructEnd(buf[offset:])
+ return offset
+}
+
+func (p *BackendServiceCancelPlanFragmentResult) BLength() int {
+ l := 0
+ l += bthrift.Binary.StructBeginLength("cancel_plan_fragment_result")
+ if p != nil {
+ l += p.field0Length()
+ }
+ l += bthrift.Binary.FieldStopLength()
+ l += bthrift.Binary.StructEndLength()
+ return l
+}
+
+func (p *BackendServiceCancelPlanFragmentResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ if p.IsSetSuccess() {
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "success", thrift.STRUCT, 0)
+ offset += p.Success.FastWriteNocopy(buf[offset:], binaryWriter)
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ }
+ return offset
+}
+
+func (p *BackendServiceCancelPlanFragmentResult) field0Length() int {
+ l := 0
+ if p.IsSetSuccess() {
+ l += bthrift.Binary.FieldBeginLength("success", thrift.STRUCT, 0)
+ l += p.Success.BLength()
+ l += bthrift.Binary.FieldEndLength()
+ }
+ return l
+}
+
+func (p *BackendServiceTransmitDataArgs) FastRead(buf []byte) (int, error) {
+ var err error
+ var offset int
+ var l int
+ var fieldTypeId thrift.TType
+ var fieldId int16
+ _, l, err = bthrift.Binary.ReadStructBegin(buf)
+ offset += l
+ if err != nil {
+ goto ReadStructBeginError
+ }
+
+ for {
+ _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldBeginError
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+ switch fieldId {
+ case 1:
+ if fieldTypeId == thrift.STRUCT {
+ l, err = p.FastReadField1(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+ default:
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+
+ l, err = bthrift.Binary.ReadFieldEnd(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldEndError
+ }
+ }
+ l, err = bthrift.Binary.ReadStructEnd(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadStructEndError
+ }
+
+ return offset, nil
+ReadStructBeginError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err)
+ReadFieldBeginError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
+ReadFieldError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceTransmitDataArgs[fieldId]), err)
+SkipFieldError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
+ReadFieldEndError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err)
+ReadStructEndError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+}
+
+func (p *BackendServiceTransmitDataArgs) FastReadField1(buf []byte) (int, error) {
+ offset := 0
+
+ tmp := palointernalservice.NewTTransmitDataParams()
+ if l, err := tmp.FastRead(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+ }
+ p.Params = tmp
+ return offset, nil
+}
+
+// for compatibility
+func (p *BackendServiceTransmitDataArgs) FastWrite(buf []byte) int {
+ return 0
+}
+
+func (p *BackendServiceTransmitDataArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ offset += bthrift.Binary.WriteStructBegin(buf[offset:], "transmit_data_args")
+ if p != nil {
+ offset += p.fastWriteField1(buf[offset:], binaryWriter)
+ }
+ offset += bthrift.Binary.WriteFieldStop(buf[offset:])
+ offset += bthrift.Binary.WriteStructEnd(buf[offset:])
+ return offset
+}
+
+func (p *BackendServiceTransmitDataArgs) BLength() int {
+ l := 0
+ l += bthrift.Binary.StructBeginLength("transmit_data_args")
+ if p != nil {
+ l += p.field1Length()
+ }
+ l += bthrift.Binary.FieldStopLength()
+ l += bthrift.Binary.StructEndLength()
+ return l
+}
+
+func (p *BackendServiceTransmitDataArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "params", thrift.STRUCT, 1)
+ offset += p.Params.FastWriteNocopy(buf[offset:], binaryWriter)
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ return offset
+}
+
+func (p *BackendServiceTransmitDataArgs) field1Length() int {
+ l := 0
+ l += bthrift.Binary.FieldBeginLength("params", thrift.STRUCT, 1)
+ l += p.Params.BLength()
+ l += bthrift.Binary.FieldEndLength()
+ return l
+}
+
+func (p *BackendServiceTransmitDataResult) FastRead(buf []byte) (int, error) {
+ var err error
+ var offset int
+ var l int
+ var fieldTypeId thrift.TType
+ var fieldId int16
+ _, l, err = bthrift.Binary.ReadStructBegin(buf)
+ offset += l
+ if err != nil {
+ goto ReadStructBeginError
+ }
+
+ for {
+ _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldBeginError
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+ switch fieldId {
+ case 0:
+ if fieldTypeId == thrift.STRUCT {
+ l, err = p.FastReadField0(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+ default:
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+
+ l, err = bthrift.Binary.ReadFieldEnd(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldEndError
+ }
+ }
+ l, err = bthrift.Binary.ReadStructEnd(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadStructEndError
+ }
+
+ return offset, nil
+ReadStructBeginError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err)
+ReadFieldBeginError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
+ReadFieldError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceTransmitDataResult[fieldId]), err)
+SkipFieldError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
+ReadFieldEndError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err)
+ReadStructEndError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+}
+
+func (p *BackendServiceTransmitDataResult) FastReadField0(buf []byte) (int, error) {
+ offset := 0
+
+ tmp := palointernalservice.NewTTransmitDataResult_()
+ if l, err := tmp.FastRead(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+ }
+ p.Success = tmp
+ return offset, nil
+}
+
+// for compatibility
+func (p *BackendServiceTransmitDataResult) FastWrite(buf []byte) int {
+ return 0
+}
+
+func (p *BackendServiceTransmitDataResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ offset += bthrift.Binary.WriteStructBegin(buf[offset:], "transmit_data_result")
+ if p != nil {
+ offset += p.fastWriteField0(buf[offset:], binaryWriter)
+ }
+ offset += bthrift.Binary.WriteFieldStop(buf[offset:])
+ offset += bthrift.Binary.WriteStructEnd(buf[offset:])
+ return offset
+}
+
+func (p *BackendServiceTransmitDataResult) BLength() int {
+ l := 0
+ l += bthrift.Binary.StructBeginLength("transmit_data_result")
+ if p != nil {
+ l += p.field0Length()
+ }
+ l += bthrift.Binary.FieldStopLength()
+ l += bthrift.Binary.StructEndLength()
+ return l
+}
+
+func (p *BackendServiceTransmitDataResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ if p.IsSetSuccess() {
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "success", thrift.STRUCT, 0)
+ offset += p.Success.FastWriteNocopy(buf[offset:], binaryWriter)
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ }
+ return offset
+}
+
+func (p *BackendServiceTransmitDataResult) field0Length() int {
+ l := 0
+ if p.IsSetSuccess() {
+ l += bthrift.Binary.FieldBeginLength("success", thrift.STRUCT, 0)
+ l += p.Success.BLength()
+ l += bthrift.Binary.FieldEndLength()
+ }
+ return l
+}
+
+func (p *BackendServiceSubmitTasksArgs) FastRead(buf []byte) (int, error) {
+ var err error
+ var offset int
+ var l int
+ var fieldTypeId thrift.TType
+ var fieldId int16
+ _, l, err = bthrift.Binary.ReadStructBegin(buf)
+ offset += l
+ if err != nil {
+ goto ReadStructBeginError
+ }
+
+ for {
+ _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldBeginError
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+ switch fieldId {
+ case 1:
+ if fieldTypeId == thrift.LIST {
+ l, err = p.FastReadField1(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+ default:
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+
+ l, err = bthrift.Binary.ReadFieldEnd(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldEndError
+ }
+ }
+ l, err = bthrift.Binary.ReadStructEnd(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadStructEndError
+ }
+
+ return offset, nil
+ReadStructBeginError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err)
+ReadFieldBeginError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
+ReadFieldError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceSubmitTasksArgs[fieldId]), err)
+SkipFieldError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
+ReadFieldEndError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err)
+ReadStructEndError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+}
+
+func (p *BackendServiceSubmitTasksArgs) FastReadField1(buf []byte) (int, error) {
+ offset := 0
+
+ _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:])
+ offset += l
+ if err != nil {
+ return offset, err
+ }
+ p.Tasks = make([]*agentservice.TAgentTaskRequest, 0, size)
+ for i := 0; i < size; i++ {
+ _elem := agentservice.NewTAgentTaskRequest()
+ if l, err := _elem.FastRead(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+ }
+
+ p.Tasks = append(p.Tasks, _elem)
+ }
+ if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+ }
+ return offset, nil
+}
+
+// for compatibility
+func (p *BackendServiceSubmitTasksArgs) FastWrite(buf []byte) int {
+ return 0
+}
+
+func (p *BackendServiceSubmitTasksArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ offset += bthrift.Binary.WriteStructBegin(buf[offset:], "submit_tasks_args")
+ if p != nil {
+ offset += p.fastWriteField1(buf[offset:], binaryWriter)
+ }
+ offset += bthrift.Binary.WriteFieldStop(buf[offset:])
+ offset += bthrift.Binary.WriteStructEnd(buf[offset:])
+ return offset
+}
+
+func (p *BackendServiceSubmitTasksArgs) BLength() int {
+ l := 0
+ l += bthrift.Binary.StructBeginLength("submit_tasks_args")
+ if p != nil {
+ l += p.field1Length()
+ }
+ l += bthrift.Binary.FieldStopLength()
+ l += bthrift.Binary.StructEndLength()
+ return l
+}
+
+func (p *BackendServiceSubmitTasksArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "tasks", thrift.LIST, 1)
+ listBeginOffset := offset
+ offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0)
+ var length int
+ for _, v := range p.Tasks {
+ length++
+ offset += v.FastWriteNocopy(buf[offset:], binaryWriter)
+ }
+ bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length)
+ offset += bthrift.Binary.WriteListEnd(buf[offset:])
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ return offset
+}
+
+func (p *BackendServiceSubmitTasksArgs) field1Length() int {
+ l := 0
+ l += bthrift.Binary.FieldBeginLength("tasks", thrift.LIST, 1)
+ l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.Tasks))
+ for _, v := range p.Tasks {
+ l += v.BLength()
+ }
+ l += bthrift.Binary.ListEndLength()
+ l += bthrift.Binary.FieldEndLength()
+ return l
+}
+
+func (p *BackendServiceSubmitTasksResult) FastRead(buf []byte) (int, error) {
+ var err error
+ var offset int
+ var l int
+ var fieldTypeId thrift.TType
+ var fieldId int16
+ _, l, err = bthrift.Binary.ReadStructBegin(buf)
+ offset += l
+ if err != nil {
+ goto ReadStructBeginError
+ }
+
+ for {
+ _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldBeginError
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+ switch fieldId {
+ case 0:
+ if fieldTypeId == thrift.STRUCT {
+ l, err = p.FastReadField0(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+ default:
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+
+ l, err = bthrift.Binary.ReadFieldEnd(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldEndError
+ }
+ }
+ l, err = bthrift.Binary.ReadStructEnd(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadStructEndError
+ }
+
+ return offset, nil
+ReadStructBeginError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err)
+ReadFieldBeginError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
+ReadFieldError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceSubmitTasksResult[fieldId]), err)
+SkipFieldError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
+ReadFieldEndError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err)
+ReadStructEndError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+}
+
+func (p *BackendServiceSubmitTasksResult) FastReadField0(buf []byte) (int, error) {
+ offset := 0
+
+ tmp := agentservice.NewTAgentResult_()
+ if l, err := tmp.FastRead(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+ }
+ p.Success = tmp
+ return offset, nil
+}
+
+// for compatibility
+func (p *BackendServiceSubmitTasksResult) FastWrite(buf []byte) int {
+ return 0
+}
+
+func (p *BackendServiceSubmitTasksResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ offset += bthrift.Binary.WriteStructBegin(buf[offset:], "submit_tasks_result")
+ if p != nil {
+ offset += p.fastWriteField0(buf[offset:], binaryWriter)
+ }
+ offset += bthrift.Binary.WriteFieldStop(buf[offset:])
+ offset += bthrift.Binary.WriteStructEnd(buf[offset:])
+ return offset
+}
+
+func (p *BackendServiceSubmitTasksResult) BLength() int {
+ l := 0
+ l += bthrift.Binary.StructBeginLength("submit_tasks_result")
+ if p != nil {
+ l += p.field0Length()
+ }
+ l += bthrift.Binary.FieldStopLength()
+ l += bthrift.Binary.StructEndLength()
+ return l
+}
+
+func (p *BackendServiceSubmitTasksResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ if p.IsSetSuccess() {
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "success", thrift.STRUCT, 0)
+ offset += p.Success.FastWriteNocopy(buf[offset:], binaryWriter)
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ }
+ return offset
+}
+
+func (p *BackendServiceSubmitTasksResult) field0Length() int {
+ l := 0
+ if p.IsSetSuccess() {
+ l += bthrift.Binary.FieldBeginLength("success", thrift.STRUCT, 0)
+ l += p.Success.BLength()
+ l += bthrift.Binary.FieldEndLength()
+ }
+ return l
+}
+
+func (p *BackendServiceMakeSnapshotArgs) FastRead(buf []byte) (int, error) {
+ var err error
+ var offset int
+ var l int
+ var fieldTypeId thrift.TType
+ var fieldId int16
+ _, l, err = bthrift.Binary.ReadStructBegin(buf)
+ offset += l
+ if err != nil {
+ goto ReadStructBeginError
+ }
+
+ for {
+ _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldBeginError
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+ switch fieldId {
+ case 1:
+ if fieldTypeId == thrift.STRUCT {
+ l, err = p.FastReadField1(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+ default:
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+
+ l, err = bthrift.Binary.ReadFieldEnd(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldEndError
+ }
+ }
+ l, err = bthrift.Binary.ReadStructEnd(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadStructEndError
+ }
+
+ return offset, nil
+ReadStructBeginError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err)
+ReadFieldBeginError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
+ReadFieldError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceMakeSnapshotArgs[fieldId]), err)
+SkipFieldError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
+ReadFieldEndError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err)
+ReadStructEndError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+}
+
+func (p *BackendServiceMakeSnapshotArgs) FastReadField1(buf []byte) (int, error) {
+ offset := 0
+
+ tmp := agentservice.NewTSnapshotRequest()
+ if l, err := tmp.FastRead(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+ }
+ p.SnapshotRequest = tmp
+ return offset, nil
+}
+
+// for compatibility
+func (p *BackendServiceMakeSnapshotArgs) FastWrite(buf []byte) int {
+ return 0
+}
+
+func (p *BackendServiceMakeSnapshotArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ offset += bthrift.Binary.WriteStructBegin(buf[offset:], "make_snapshot_args")
+ if p != nil {
+ offset += p.fastWriteField1(buf[offset:], binaryWriter)
+ }
+ offset += bthrift.Binary.WriteFieldStop(buf[offset:])
+ offset += bthrift.Binary.WriteStructEnd(buf[offset:])
+ return offset
+}
+
+func (p *BackendServiceMakeSnapshotArgs) BLength() int {
+ l := 0
+ l += bthrift.Binary.StructBeginLength("make_snapshot_args")
+ if p != nil {
+ l += p.field1Length()
+ }
+ l += bthrift.Binary.FieldStopLength()
+ l += bthrift.Binary.StructEndLength()
+ return l
+}
+
+func (p *BackendServiceMakeSnapshotArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "snapshot_request", thrift.STRUCT, 1)
+ offset += p.SnapshotRequest.FastWriteNocopy(buf[offset:], binaryWriter)
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ return offset
+}
+
+func (p *BackendServiceMakeSnapshotArgs) field1Length() int {
+ l := 0
+ l += bthrift.Binary.FieldBeginLength("snapshot_request", thrift.STRUCT, 1)
+ l += p.SnapshotRequest.BLength()
+ l += bthrift.Binary.FieldEndLength()
+ return l
+}
+
+func (p *BackendServiceMakeSnapshotResult) FastRead(buf []byte) (int, error) {
+ var err error
+ var offset int
+ var l int
+ var fieldTypeId thrift.TType
+ var fieldId int16
+ _, l, err = bthrift.Binary.ReadStructBegin(buf)
+ offset += l
+ if err != nil {
+ goto ReadStructBeginError
+ }
+
+ for {
+ _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldBeginError
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+ switch fieldId {
+ case 0:
+ if fieldTypeId == thrift.STRUCT {
+ l, err = p.FastReadField0(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+ default:
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+
+ l, err = bthrift.Binary.ReadFieldEnd(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldEndError
+ }
+ }
+ l, err = bthrift.Binary.ReadStructEnd(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadStructEndError
+ }
+
+ return offset, nil
+ReadStructBeginError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err)
+ReadFieldBeginError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
+ReadFieldError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceMakeSnapshotResult[fieldId]), err)
+SkipFieldError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
+ReadFieldEndError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err)
+ReadStructEndError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+}
+
+func (p *BackendServiceMakeSnapshotResult) FastReadField0(buf []byte) (int, error) {
+ offset := 0
+
+ tmp := agentservice.NewTAgentResult_()
+ if l, err := tmp.FastRead(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+ }
+ p.Success = tmp
+ return offset, nil
+}
+
+// for compatibility
+func (p *BackendServiceMakeSnapshotResult) FastWrite(buf []byte) int {
+ return 0
+}
+
+func (p *BackendServiceMakeSnapshotResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ offset += bthrift.Binary.WriteStructBegin(buf[offset:], "make_snapshot_result")
+ if p != nil {
+ offset += p.fastWriteField0(buf[offset:], binaryWriter)
+ }
+ offset += bthrift.Binary.WriteFieldStop(buf[offset:])
+ offset += bthrift.Binary.WriteStructEnd(buf[offset:])
+ return offset
+}
+
+func (p *BackendServiceMakeSnapshotResult) BLength() int {
+ l := 0
+ l += bthrift.Binary.StructBeginLength("make_snapshot_result")
+ if p != nil {
+ l += p.field0Length()
+ }
+ l += bthrift.Binary.FieldStopLength()
+ l += bthrift.Binary.StructEndLength()
+ return l
+}
+
+func (p *BackendServiceMakeSnapshotResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ if p.IsSetSuccess() {
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "success", thrift.STRUCT, 0)
+ offset += p.Success.FastWriteNocopy(buf[offset:], binaryWriter)
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ }
+ return offset
+}
+
+func (p *BackendServiceMakeSnapshotResult) field0Length() int {
+ l := 0
+ if p.IsSetSuccess() {
+ l += bthrift.Binary.FieldBeginLength("success", thrift.STRUCT, 0)
+ l += p.Success.BLength()
+ l += bthrift.Binary.FieldEndLength()
+ }
+ return l
+}
+
+func (p *BackendServiceReleaseSnapshotArgs) FastRead(buf []byte) (int, error) {
+ var err error
+ var offset int
+ var l int
+ var fieldTypeId thrift.TType
+ var fieldId int16
+ _, l, err = bthrift.Binary.ReadStructBegin(buf)
+ offset += l
+ if err != nil {
+ goto ReadStructBeginError
+ }
+
+ for {
+ _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldBeginError
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+ switch fieldId {
+ case 1:
+ if fieldTypeId == thrift.STRING {
+ l, err = p.FastReadField1(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+ default:
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+
+ l, err = bthrift.Binary.ReadFieldEnd(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldEndError
+ }
+ }
+ l, err = bthrift.Binary.ReadStructEnd(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadStructEndError
+ }
+
+ return offset, nil
+ReadStructBeginError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err)
+ReadFieldBeginError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
+ReadFieldError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceReleaseSnapshotArgs[fieldId]), err)
+SkipFieldError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
+ReadFieldEndError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err)
+ReadStructEndError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+}
+
+func (p *BackendServiceReleaseSnapshotArgs) FastReadField1(buf []byte) (int, error) {
+ offset := 0
+
+ if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+
+ p.SnapshotPath = v
+
+ }
+ return offset, nil
+}
+
+// for compatibility
+func (p *BackendServiceReleaseSnapshotArgs) FastWrite(buf []byte) int {
+ return 0
+}
+
+func (p *BackendServiceReleaseSnapshotArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ offset += bthrift.Binary.WriteStructBegin(buf[offset:], "release_snapshot_args")
+ if p != nil {
+ offset += p.fastWriteField1(buf[offset:], binaryWriter)
+ }
+ offset += bthrift.Binary.WriteFieldStop(buf[offset:])
+ offset += bthrift.Binary.WriteStructEnd(buf[offset:])
+ return offset
+}
+
+func (p *BackendServiceReleaseSnapshotArgs) BLength() int {
+ l := 0
+ l += bthrift.Binary.StructBeginLength("release_snapshot_args")
+ if p != nil {
+ l += p.field1Length()
+ }
+ l += bthrift.Binary.FieldStopLength()
+ l += bthrift.Binary.StructEndLength()
+ return l
+}
+
+func (p *BackendServiceReleaseSnapshotArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "snapshot_path", thrift.STRING, 1)
+ offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, p.SnapshotPath)
+
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ return offset
+}
+
+func (p *BackendServiceReleaseSnapshotArgs) field1Length() int {
+ l := 0
+ l += bthrift.Binary.FieldBeginLength("snapshot_path", thrift.STRING, 1)
+ l += bthrift.Binary.StringLengthNocopy(p.SnapshotPath)
+
+ l += bthrift.Binary.FieldEndLength()
+ return l
+}
+
+func (p *BackendServiceReleaseSnapshotResult) FastRead(buf []byte) (int, error) {
+ var err error
+ var offset int
+ var l int
+ var fieldTypeId thrift.TType
+ var fieldId int16
+ _, l, err = bthrift.Binary.ReadStructBegin(buf)
+ offset += l
+ if err != nil {
+ goto ReadStructBeginError
+ }
+
+ for {
+ _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldBeginError
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+ switch fieldId {
+ case 0:
+ if fieldTypeId == thrift.STRUCT {
+ l, err = p.FastReadField0(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+ default:
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+
+ l, err = bthrift.Binary.ReadFieldEnd(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldEndError
+ }
+ }
+ l, err = bthrift.Binary.ReadStructEnd(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadStructEndError
+ }
+
+ return offset, nil
+ReadStructBeginError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err)
+ReadFieldBeginError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
+ReadFieldError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceReleaseSnapshotResult[fieldId]), err)
+SkipFieldError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
+ReadFieldEndError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err)
+ReadStructEndError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+}
+
+func (p *BackendServiceReleaseSnapshotResult) FastReadField0(buf []byte) (int, error) {
+ offset := 0
+
+ tmp := agentservice.NewTAgentResult_()
+ if l, err := tmp.FastRead(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+ }
+ p.Success = tmp
+ return offset, nil
+}
+
+// for compatibility
+func (p *BackendServiceReleaseSnapshotResult) FastWrite(buf []byte) int {
+ return 0
+}
+
+func (p *BackendServiceReleaseSnapshotResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ offset += bthrift.Binary.WriteStructBegin(buf[offset:], "release_snapshot_result")
+ if p != nil {
+ offset += p.fastWriteField0(buf[offset:], binaryWriter)
+ }
+ offset += bthrift.Binary.WriteFieldStop(buf[offset:])
+ offset += bthrift.Binary.WriteStructEnd(buf[offset:])
+ return offset
+}
+
+func (p *BackendServiceReleaseSnapshotResult) BLength() int {
+ l := 0
+ l += bthrift.Binary.StructBeginLength("release_snapshot_result")
+ if p != nil {
+ l += p.field0Length()
+ }
+ l += bthrift.Binary.FieldStopLength()
+ l += bthrift.Binary.StructEndLength()
+ return l
+}
+
+func (p *BackendServiceReleaseSnapshotResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ if p.IsSetSuccess() {
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "success", thrift.STRUCT, 0)
+ offset += p.Success.FastWriteNocopy(buf[offset:], binaryWriter)
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ }
+ return offset
+}
+
+func (p *BackendServiceReleaseSnapshotResult) field0Length() int {
+ l := 0
+ if p.IsSetSuccess() {
+ l += bthrift.Binary.FieldBeginLength("success", thrift.STRUCT, 0)
+ l += p.Success.BLength()
+ l += bthrift.Binary.FieldEndLength()
+ }
+ return l
+}
+
+func (p *BackendServicePublishClusterStateArgs) FastRead(buf []byte) (int, error) {
+ var err error
+ var offset int
+ var l int
+ var fieldTypeId thrift.TType
+ var fieldId int16
+ _, l, err = bthrift.Binary.ReadStructBegin(buf)
+ offset += l
+ if err != nil {
+ goto ReadStructBeginError
+ }
+
+ for {
+ _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldBeginError
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+ switch fieldId {
+ case 1:
+ if fieldTypeId == thrift.STRUCT {
+ l, err = p.FastReadField1(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+ default:
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+
+ l, err = bthrift.Binary.ReadFieldEnd(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldEndError
+ }
+ }
+ l, err = bthrift.Binary.ReadStructEnd(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadStructEndError
+ }
+
+ return offset, nil
+ReadStructBeginError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err)
+ReadFieldBeginError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
+ReadFieldError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServicePublishClusterStateArgs[fieldId]), err)
+SkipFieldError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
+ReadFieldEndError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err)
+ReadStructEndError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+}
+
+func (p *BackendServicePublishClusterStateArgs) FastReadField1(buf []byte) (int, error) {
+ offset := 0
+
+ tmp := agentservice.NewTAgentPublishRequest()
+ if l, err := tmp.FastRead(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+ }
+ p.Request = tmp
+ return offset, nil
+}
+
+// for compatibility
+func (p *BackendServicePublishClusterStateArgs) FastWrite(buf []byte) int {
+ return 0
+}
+
+func (p *BackendServicePublishClusterStateArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ offset += bthrift.Binary.WriteStructBegin(buf[offset:], "publish_cluster_state_args")
+ if p != nil {
+ offset += p.fastWriteField1(buf[offset:], binaryWriter)
+ }
+ offset += bthrift.Binary.WriteFieldStop(buf[offset:])
+ offset += bthrift.Binary.WriteStructEnd(buf[offset:])
+ return offset
+}
+
+func (p *BackendServicePublishClusterStateArgs) BLength() int {
+ l := 0
+ l += bthrift.Binary.StructBeginLength("publish_cluster_state_args")
+ if p != nil {
+ l += p.field1Length()
+ }
+ l += bthrift.Binary.FieldStopLength()
+ l += bthrift.Binary.StructEndLength()
+ return l
+}
+
+func (p *BackendServicePublishClusterStateArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "request", thrift.STRUCT, 1)
+ offset += p.Request.FastWriteNocopy(buf[offset:], binaryWriter)
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ return offset
+}
+
+func (p *BackendServicePublishClusterStateArgs) field1Length() int {
+ l := 0
+ l += bthrift.Binary.FieldBeginLength("request", thrift.STRUCT, 1)
+ l += p.Request.BLength()
+ l += bthrift.Binary.FieldEndLength()
+ return l
+}
+
+func (p *BackendServicePublishClusterStateResult) FastRead(buf []byte) (int, error) {
+ var err error
+ var offset int
+ var l int
+ var fieldTypeId thrift.TType
+ var fieldId int16
+ _, l, err = bthrift.Binary.ReadStructBegin(buf)
+ offset += l
+ if err != nil {
+ goto ReadStructBeginError
+ }
+
+ for {
+ _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldBeginError
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+ switch fieldId {
+ case 0:
+ if fieldTypeId == thrift.STRUCT {
+ l, err = p.FastReadField0(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+ default:
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+
+ l, err = bthrift.Binary.ReadFieldEnd(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldEndError
+ }
+ }
+ l, err = bthrift.Binary.ReadStructEnd(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadStructEndError
+ }
+
+ return offset, nil
+ReadStructBeginError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err)
+ReadFieldBeginError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
+ReadFieldError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServicePublishClusterStateResult[fieldId]), err)
+SkipFieldError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
+ReadFieldEndError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err)
+ReadStructEndError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+}
+
+func (p *BackendServicePublishClusterStateResult) FastReadField0(buf []byte) (int, error) {
+ offset := 0
+
+ tmp := agentservice.NewTAgentResult_()
+ if l, err := tmp.FastRead(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+ }
+ p.Success = tmp
+ return offset, nil
+}
+
+// for compatibility
+func (p *BackendServicePublishClusterStateResult) FastWrite(buf []byte) int {
+ return 0
+}
+
+func (p *BackendServicePublishClusterStateResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ offset += bthrift.Binary.WriteStructBegin(buf[offset:], "publish_cluster_state_result")
+ if p != nil {
+ offset += p.fastWriteField0(buf[offset:], binaryWriter)
+ }
+ offset += bthrift.Binary.WriteFieldStop(buf[offset:])
+ offset += bthrift.Binary.WriteStructEnd(buf[offset:])
+ return offset
+}
+
+func (p *BackendServicePublishClusterStateResult) BLength() int {
+ l := 0
+ l += bthrift.Binary.StructBeginLength("publish_cluster_state_result")
+ if p != nil {
+ l += p.field0Length()
+ }
+ l += bthrift.Binary.FieldStopLength()
+ l += bthrift.Binary.StructEndLength()
+ return l
+}
+func (p *BackendServicePublishClusterStateResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ if p.IsSetSuccess() {
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "success", thrift.STRUCT, 0)
+ offset += p.Success.FastWriteNocopy(buf[offset:], binaryWriter)
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ }
+ return offset
+}
+
+func (p *BackendServicePublishClusterStateResult) field0Length() int {
+ l := 0
+ if p.IsSetSuccess() {
+ l += bthrift.Binary.FieldBeginLength("success", thrift.STRUCT, 0)
+ l += p.Success.BLength()
l += bthrift.Binary.FieldEndLength()
}
- return l
+ return l
+}
+
+func (p *BackendServiceSubmitExportTaskArgs) FastRead(buf []byte) (int, error) {
+ var err error
+ var offset int
+ var l int
+ var fieldTypeId thrift.TType
+ var fieldId int16
+ _, l, err = bthrift.Binary.ReadStructBegin(buf)
+ offset += l
+ if err != nil {
+ goto ReadStructBeginError
+ }
+
+ for {
+ _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldBeginError
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+ switch fieldId {
+ case 1:
+ if fieldTypeId == thrift.STRUCT {
+ l, err = p.FastReadField1(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+ default:
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+
+ l, err = bthrift.Binary.ReadFieldEnd(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldEndError
+ }
+ }
+ l, err = bthrift.Binary.ReadStructEnd(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadStructEndError
+ }
+
+ return offset, nil
+ReadStructBeginError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err)
+ReadFieldBeginError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
+ReadFieldError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceSubmitExportTaskArgs[fieldId]), err)
+SkipFieldError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
+ReadFieldEndError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err)
+ReadStructEndError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+}
+
+func (p *BackendServiceSubmitExportTaskArgs) FastReadField1(buf []byte) (int, error) {
+ offset := 0
+
+ tmp := NewTExportTaskRequest()
+ if l, err := tmp.FastRead(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+ }
+ p.Request = tmp
+ return offset, nil
}
-func (p *TIngestBinlogRequest) field5Length() int {
- l := 0
- if p.IsSetRemotePort() {
- l += bthrift.Binary.FieldBeginLength("remote_port", thrift.STRING, 5)
- l += bthrift.Binary.StringLengthNocopy(*p.RemotePort)
+// for compatibility
+func (p *BackendServiceSubmitExportTaskArgs) FastWrite(buf []byte) int {
+ return 0
+}
- l += bthrift.Binary.FieldEndLength()
+func (p *BackendServiceSubmitExportTaskArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ offset += bthrift.Binary.WriteStructBegin(buf[offset:], "submit_export_task_args")
+ if p != nil {
+ offset += p.fastWriteField1(buf[offset:], binaryWriter)
}
- return l
+ offset += bthrift.Binary.WriteFieldStop(buf[offset:])
+ offset += bthrift.Binary.WriteStructEnd(buf[offset:])
+ return offset
}
-func (p *TIngestBinlogRequest) field6Length() int {
+func (p *BackendServiceSubmitExportTaskArgs) BLength() int {
l := 0
- if p.IsSetPartitionId() {
- l += bthrift.Binary.FieldBeginLength("partition_id", thrift.I64, 6)
- l += bthrift.Binary.I64Length(*p.PartitionId)
-
- l += bthrift.Binary.FieldEndLength()
+ l += bthrift.Binary.StructBeginLength("submit_export_task_args")
+ if p != nil {
+ l += p.field1Length()
}
+ l += bthrift.Binary.FieldStopLength()
+ l += bthrift.Binary.StructEndLength()
return l
}
-func (p *TIngestBinlogRequest) field7Length() int {
- l := 0
- if p.IsSetLocalTabletId() {
- l += bthrift.Binary.FieldBeginLength("local_tablet_id", thrift.I64, 7)
- l += bthrift.Binary.I64Length(*p.LocalTabletId)
-
- l += bthrift.Binary.FieldEndLength()
- }
- return l
+func (p *BackendServiceSubmitExportTaskArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "request", thrift.STRUCT, 1)
+ offset += p.Request.FastWriteNocopy(buf[offset:], binaryWriter)
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ return offset
}
-func (p *TIngestBinlogRequest) field8Length() int {
+func (p *BackendServiceSubmitExportTaskArgs) field1Length() int {
l := 0
- if p.IsSetLoadId() {
- l += bthrift.Binary.FieldBeginLength("load_id", thrift.STRUCT, 8)
- l += p.LoadId.BLength()
- l += bthrift.Binary.FieldEndLength()
- }
+ l += bthrift.Binary.FieldBeginLength("request", thrift.STRUCT, 1)
+ l += p.Request.BLength()
+ l += bthrift.Binary.FieldEndLength()
return l
}
-func (p *TIngestBinlogResult_) FastRead(buf []byte) (int, error) {
+func (p *BackendServiceSubmitExportTaskResult) FastRead(buf []byte) (int, error) {
var err error
var offset int
var l int
@@ -4945,9 +13405,9 @@ func (p *TIngestBinlogResult_) FastRead(buf []byte) (int, error) {
break
}
switch fieldId {
- case 1:
+ case 0:
if fieldTypeId == thrift.STRUCT {
- l, err = p.FastReadField1(buf[offset:])
+ l, err = p.FastReadField0(buf[offset:])
offset += l
if err != nil {
goto ReadFieldError
@@ -4985,7 +13445,7 @@ ReadStructBeginError:
ReadFieldBeginError:
return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
ReadFieldError:
- return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TIngestBinlogResult_[fieldId]), err)
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceSubmitExportTaskResult[fieldId]), err)
SkipFieldError:
return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
ReadFieldEndError:
@@ -4994,7 +13454,7 @@ ReadStructEndError:
return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
}
-func (p *TIngestBinlogResult_) FastReadField1(buf []byte) (int, error) {
+func (p *BackendServiceSubmitExportTaskResult) FastReadField0(buf []byte) (int, error) {
offset := 0
tmp := status.NewTStatus()
@@ -5003,58 +13463,58 @@ func (p *TIngestBinlogResult_) FastReadField1(buf []byte) (int, error) {
} else {
offset += l
}
- p.Status = tmp
+ p.Success = tmp
return offset, nil
}
// for compatibility
-func (p *TIngestBinlogResult_) FastWrite(buf []byte) int {
+func (p *BackendServiceSubmitExportTaskResult) FastWrite(buf []byte) int {
return 0
}
-func (p *TIngestBinlogResult_) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+func (p *BackendServiceSubmitExportTaskResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int {
offset := 0
- offset += bthrift.Binary.WriteStructBegin(buf[offset:], "TIngestBinlogResult")
+ offset += bthrift.Binary.WriteStructBegin(buf[offset:], "submit_export_task_result")
if p != nil {
- offset += p.fastWriteField1(buf[offset:], binaryWriter)
+ offset += p.fastWriteField0(buf[offset:], binaryWriter)
}
offset += bthrift.Binary.WriteFieldStop(buf[offset:])
offset += bthrift.Binary.WriteStructEnd(buf[offset:])
return offset
}
-func (p *TIngestBinlogResult_) BLength() int {
+func (p *BackendServiceSubmitExportTaskResult) BLength() int {
l := 0
- l += bthrift.Binary.StructBeginLength("TIngestBinlogResult")
+ l += bthrift.Binary.StructBeginLength("submit_export_task_result")
if p != nil {
- l += p.field1Length()
+ l += p.field0Length()
}
l += bthrift.Binary.FieldStopLength()
l += bthrift.Binary.StructEndLength()
return l
}
-func (p *TIngestBinlogResult_) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+func (p *BackendServiceSubmitExportTaskResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int {
offset := 0
- if p.IsSetStatus() {
- offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "status", thrift.STRUCT, 1)
- offset += p.Status.FastWriteNocopy(buf[offset:], binaryWriter)
+ if p.IsSetSuccess() {
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "success", thrift.STRUCT, 0)
+ offset += p.Success.FastWriteNocopy(buf[offset:], binaryWriter)
offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
}
return offset
}
-func (p *TIngestBinlogResult_) field1Length() int {
+func (p *BackendServiceSubmitExportTaskResult) field0Length() int {
l := 0
- if p.IsSetStatus() {
- l += bthrift.Binary.FieldBeginLength("status", thrift.STRUCT, 1)
- l += p.Status.BLength()
+ if p.IsSetSuccess() {
+ l += bthrift.Binary.FieldBeginLength("success", thrift.STRUCT, 0)
+ l += p.Success.BLength()
l += bthrift.Binary.FieldEndLength()
}
return l
}
-func (p *BackendServiceExecPlanFragmentArgs) FastRead(buf []byte) (int, error) {
+func (p *BackendServiceGetExportStatusArgs) FastRead(buf []byte) (int, error) {
var err error
var offset int
var l int
@@ -5116,7 +13576,7 @@ ReadStructBeginError:
ReadFieldBeginError:
return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
ReadFieldError:
- return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceExecPlanFragmentArgs[fieldId]), err)
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceGetExportStatusArgs[fieldId]), err)
SkipFieldError:
return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
ReadFieldEndError:
@@ -5125,27 +13585,27 @@ ReadStructEndError:
return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
}
-func (p *BackendServiceExecPlanFragmentArgs) FastReadField1(buf []byte) (int, error) {
+func (p *BackendServiceGetExportStatusArgs) FastReadField1(buf []byte) (int, error) {
offset := 0
- tmp := palointernalservice.NewTExecPlanFragmentParams()
+ tmp := types.NewTUniqueId()
if l, err := tmp.FastRead(buf[offset:]); err != nil {
return offset, err
} else {
offset += l
}
- p.Params = tmp
+ p.TaskId = tmp
return offset, nil
}
// for compatibility
-func (p *BackendServiceExecPlanFragmentArgs) FastWrite(buf []byte) int {
+func (p *BackendServiceGetExportStatusArgs) FastWrite(buf []byte) int {
return 0
}
-func (p *BackendServiceExecPlanFragmentArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+func (p *BackendServiceGetExportStatusArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int {
offset := 0
- offset += bthrift.Binary.WriteStructBegin(buf[offset:], "exec_plan_fragment_args")
+ offset += bthrift.Binary.WriteStructBegin(buf[offset:], "get_export_status_args")
if p != nil {
offset += p.fastWriteField1(buf[offset:], binaryWriter)
}
@@ -5154,9 +13614,9 @@ func (p *BackendServiceExecPlanFragmentArgs) FastWriteNocopy(buf []byte, binaryW
return offset
}
-func (p *BackendServiceExecPlanFragmentArgs) BLength() int {
+func (p *BackendServiceGetExportStatusArgs) BLength() int {
l := 0
- l += bthrift.Binary.StructBeginLength("exec_plan_fragment_args")
+ l += bthrift.Binary.StructBeginLength("get_export_status_args")
if p != nil {
l += p.field1Length()
}
@@ -5165,23 +13625,23 @@ func (p *BackendServiceExecPlanFragmentArgs) BLength() int {
return l
}
-func (p *BackendServiceExecPlanFragmentArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+func (p *BackendServiceGetExportStatusArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int {
offset := 0
- offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "params", thrift.STRUCT, 1)
- offset += p.Params.FastWriteNocopy(buf[offset:], binaryWriter)
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "task_id", thrift.STRUCT, 1)
+ offset += p.TaskId.FastWriteNocopy(buf[offset:], binaryWriter)
offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
return offset
}
-func (p *BackendServiceExecPlanFragmentArgs) field1Length() int {
+func (p *BackendServiceGetExportStatusArgs) field1Length() int {
l := 0
- l += bthrift.Binary.FieldBeginLength("params", thrift.STRUCT, 1)
- l += p.Params.BLength()
+ l += bthrift.Binary.FieldBeginLength("task_id", thrift.STRUCT, 1)
+ l += p.TaskId.BLength()
l += bthrift.Binary.FieldEndLength()
return l
}
-func (p *BackendServiceExecPlanFragmentResult) FastRead(buf []byte) (int, error) {
+func (p *BackendServiceGetExportStatusResult) FastRead(buf []byte) (int, error) {
var err error
var offset int
var l int
@@ -5243,7 +13703,7 @@ ReadStructBeginError:
ReadFieldBeginError:
return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
ReadFieldError:
- return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceExecPlanFragmentResult[fieldId]), err)
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceGetExportStatusResult[fieldId]), err)
SkipFieldError:
return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
ReadFieldEndError:
@@ -5252,10 +13712,10 @@ ReadStructEndError:
return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
}
-func (p *BackendServiceExecPlanFragmentResult) FastReadField0(buf []byte) (int, error) {
+func (p *BackendServiceGetExportStatusResult) FastReadField0(buf []byte) (int, error) {
offset := 0
- tmp := palointernalservice.NewTExecPlanFragmentResult_()
+ tmp := palointernalservice.NewTExportStatusResult_()
if l, err := tmp.FastRead(buf[offset:]); err != nil {
return offset, err
} else {
@@ -5266,13 +13726,13 @@ func (p *BackendServiceExecPlanFragmentResult) FastReadField0(buf []byte) (int,
}
// for compatibility
-func (p *BackendServiceExecPlanFragmentResult) FastWrite(buf []byte) int {
+func (p *BackendServiceGetExportStatusResult) FastWrite(buf []byte) int {
return 0
}
-func (p *BackendServiceExecPlanFragmentResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+func (p *BackendServiceGetExportStatusResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int {
offset := 0
- offset += bthrift.Binary.WriteStructBegin(buf[offset:], "exec_plan_fragment_result")
+ offset += bthrift.Binary.WriteStructBegin(buf[offset:], "get_export_status_result")
if p != nil {
offset += p.fastWriteField0(buf[offset:], binaryWriter)
}
@@ -5281,9 +13741,9 @@ func (p *BackendServiceExecPlanFragmentResult) FastWriteNocopy(buf []byte, binar
return offset
}
-func (p *BackendServiceExecPlanFragmentResult) BLength() int {
+func (p *BackendServiceGetExportStatusResult) BLength() int {
l := 0
- l += bthrift.Binary.StructBeginLength("exec_plan_fragment_result")
+ l += bthrift.Binary.StructBeginLength("get_export_status_result")
if p != nil {
l += p.field0Length()
}
@@ -5292,7 +13752,7 @@ func (p *BackendServiceExecPlanFragmentResult) BLength() int {
return l
}
-func (p *BackendServiceExecPlanFragmentResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+func (p *BackendServiceGetExportStatusResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int {
offset := 0
if p.IsSetSuccess() {
offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "success", thrift.STRUCT, 0)
@@ -5302,7 +13762,7 @@ func (p *BackendServiceExecPlanFragmentResult) fastWriteField0(buf []byte, binar
return offset
}
-func (p *BackendServiceExecPlanFragmentResult) field0Length() int {
+func (p *BackendServiceGetExportStatusResult) field0Length() int {
l := 0
if p.IsSetSuccess() {
l += bthrift.Binary.FieldBeginLength("success", thrift.STRUCT, 0)
@@ -5312,7 +13772,7 @@ func (p *BackendServiceExecPlanFragmentResult) field0Length() int {
return l
}
-func (p *BackendServiceCancelPlanFragmentArgs) FastRead(buf []byte) (int, error) {
+func (p *BackendServiceEraseExportTaskArgs) FastRead(buf []byte) (int, error) {
var err error
var offset int
var l int
@@ -5374,7 +13834,7 @@ ReadStructBeginError:
ReadFieldBeginError:
return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
ReadFieldError:
- return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceCancelPlanFragmentArgs[fieldId]), err)
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceEraseExportTaskArgs[fieldId]), err)
SkipFieldError:
return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
ReadFieldEndError:
@@ -5383,27 +13843,27 @@ ReadStructEndError:
return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
}
-func (p *BackendServiceCancelPlanFragmentArgs) FastReadField1(buf []byte) (int, error) {
+func (p *BackendServiceEraseExportTaskArgs) FastReadField1(buf []byte) (int, error) {
offset := 0
- tmp := palointernalservice.NewTCancelPlanFragmentParams()
+ tmp := types.NewTUniqueId()
if l, err := tmp.FastRead(buf[offset:]); err != nil {
return offset, err
} else {
offset += l
}
- p.Params = tmp
+ p.TaskId = tmp
return offset, nil
}
// for compatibility
-func (p *BackendServiceCancelPlanFragmentArgs) FastWrite(buf []byte) int {
+func (p *BackendServiceEraseExportTaskArgs) FastWrite(buf []byte) int {
return 0
}
-func (p *BackendServiceCancelPlanFragmentArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+func (p *BackendServiceEraseExportTaskArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int {
offset := 0
- offset += bthrift.Binary.WriteStructBegin(buf[offset:], "cancel_plan_fragment_args")
+ offset += bthrift.Binary.WriteStructBegin(buf[offset:], "erase_export_task_args")
if p != nil {
offset += p.fastWriteField1(buf[offset:], binaryWriter)
}
@@ -5412,9 +13872,9 @@ func (p *BackendServiceCancelPlanFragmentArgs) FastWriteNocopy(buf []byte, binar
return offset
}
-func (p *BackendServiceCancelPlanFragmentArgs) BLength() int {
+func (p *BackendServiceEraseExportTaskArgs) BLength() int {
l := 0
- l += bthrift.Binary.StructBeginLength("cancel_plan_fragment_args")
+ l += bthrift.Binary.StructBeginLength("erase_export_task_args")
if p != nil {
l += p.field1Length()
}
@@ -5423,23 +13883,23 @@ func (p *BackendServiceCancelPlanFragmentArgs) BLength() int {
return l
}
-func (p *BackendServiceCancelPlanFragmentArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+func (p *BackendServiceEraseExportTaskArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int {
offset := 0
- offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "params", thrift.STRUCT, 1)
- offset += p.Params.FastWriteNocopy(buf[offset:], binaryWriter)
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "task_id", thrift.STRUCT, 1)
+ offset += p.TaskId.FastWriteNocopy(buf[offset:], binaryWriter)
offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
return offset
}
-func (p *BackendServiceCancelPlanFragmentArgs) field1Length() int {
+func (p *BackendServiceEraseExportTaskArgs) field1Length() int {
l := 0
- l += bthrift.Binary.FieldBeginLength("params", thrift.STRUCT, 1)
- l += p.Params.BLength()
+ l += bthrift.Binary.FieldBeginLength("task_id", thrift.STRUCT, 1)
+ l += p.TaskId.BLength()
l += bthrift.Binary.FieldEndLength()
return l
}
-func (p *BackendServiceCancelPlanFragmentResult) FastRead(buf []byte) (int, error) {
+func (p *BackendServiceEraseExportTaskResult) FastRead(buf []byte) (int, error) {
var err error
var offset int
var l int
@@ -5501,7 +13961,7 @@ ReadStructBeginError:
ReadFieldBeginError:
return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
ReadFieldError:
- return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceCancelPlanFragmentResult[fieldId]), err)
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceEraseExportTaskResult[fieldId]), err)
SkipFieldError:
return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
ReadFieldEndError:
@@ -5510,10 +13970,10 @@ ReadStructEndError:
return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
}
-func (p *BackendServiceCancelPlanFragmentResult) FastReadField0(buf []byte) (int, error) {
+func (p *BackendServiceEraseExportTaskResult) FastReadField0(buf []byte) (int, error) {
offset := 0
- tmp := palointernalservice.NewTCancelPlanFragmentResult_()
+ tmp := status.NewTStatus()
if l, err := tmp.FastRead(buf[offset:]); err != nil {
return offset, err
} else {
@@ -5524,13 +13984,13 @@ func (p *BackendServiceCancelPlanFragmentResult) FastReadField0(buf []byte) (int
}
// for compatibility
-func (p *BackendServiceCancelPlanFragmentResult) FastWrite(buf []byte) int {
+func (p *BackendServiceEraseExportTaskResult) FastWrite(buf []byte) int {
return 0
}
-func (p *BackendServiceCancelPlanFragmentResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+func (p *BackendServiceEraseExportTaskResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int {
offset := 0
- offset += bthrift.Binary.WriteStructBegin(buf[offset:], "cancel_plan_fragment_result")
+ offset += bthrift.Binary.WriteStructBegin(buf[offset:], "erase_export_task_result")
if p != nil {
offset += p.fastWriteField0(buf[offset:], binaryWriter)
}
@@ -5539,9 +13999,9 @@ func (p *BackendServiceCancelPlanFragmentResult) FastWriteNocopy(buf []byte, bin
return offset
}
-func (p *BackendServiceCancelPlanFragmentResult) BLength() int {
+func (p *BackendServiceEraseExportTaskResult) BLength() int {
l := 0
- l += bthrift.Binary.StructBeginLength("cancel_plan_fragment_result")
+ l += bthrift.Binary.StructBeginLength("erase_export_task_result")
if p != nil {
l += p.field0Length()
}
@@ -5550,7 +14010,7 @@ func (p *BackendServiceCancelPlanFragmentResult) BLength() int {
return l
}
-func (p *BackendServiceCancelPlanFragmentResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+func (p *BackendServiceEraseExportTaskResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int {
offset := 0
if p.IsSetSuccess() {
offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "success", thrift.STRUCT, 0)
@@ -5560,7 +14020,7 @@ func (p *BackendServiceCancelPlanFragmentResult) fastWriteField0(buf []byte, bin
return offset
}
-func (p *BackendServiceCancelPlanFragmentResult) field0Length() int {
+func (p *BackendServiceEraseExportTaskResult) field0Length() int {
l := 0
if p.IsSetSuccess() {
l += bthrift.Binary.FieldBeginLength("success", thrift.STRUCT, 0)
@@ -5570,7 +14030,7 @@ func (p *BackendServiceCancelPlanFragmentResult) field0Length() int {
return l
}
-func (p *BackendServiceTransmitDataArgs) FastRead(buf []byte) (int, error) {
+func (p *BackendServiceGetTabletStatArgs) FastRead(buf []byte) (int, error) {
var err error
var offset int
var l int
@@ -5591,27 +14051,10 @@ func (p *BackendServiceTransmitDataArgs) FastRead(buf []byte) (int, error) {
if fieldTypeId == thrift.STOP {
break
}
- switch fieldId {
- case 1:
- if fieldTypeId == thrift.STRUCT {
- l, err = p.FastReadField1(buf[offset:])
- offset += l
- if err != nil {
- goto ReadFieldError
- }
- } else {
- l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
- offset += l
- if err != nil {
- goto SkipFieldError
- }
- }
- default:
- l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
- offset += l
- if err != nil {
- goto SkipFieldError
- }
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
}
l, err = bthrift.Binary.ReadFieldEnd(buf[offset:])
@@ -5631,73 +14074,40 @@ ReadStructBeginError:
return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err)
ReadFieldBeginError:
return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
-ReadFieldError:
- return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceTransmitDataArgs[fieldId]), err)
SkipFieldError:
return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
-ReadFieldEndError:
- return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err)
-ReadStructEndError:
- return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
-}
-
-func (p *BackendServiceTransmitDataArgs) FastReadField1(buf []byte) (int, error) {
- offset := 0
-
- tmp := palointernalservice.NewTTransmitDataParams()
- if l, err := tmp.FastRead(buf[offset:]); err != nil {
- return offset, err
- } else {
- offset += l
- }
- p.Params = tmp
- return offset, nil
+ReadFieldEndError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err)
+ReadStructEndError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
}
// for compatibility
-func (p *BackendServiceTransmitDataArgs) FastWrite(buf []byte) int {
+func (p *BackendServiceGetTabletStatArgs) FastWrite(buf []byte) int {
return 0
}
-func (p *BackendServiceTransmitDataArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+func (p *BackendServiceGetTabletStatArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int {
offset := 0
- offset += bthrift.Binary.WriteStructBegin(buf[offset:], "transmit_data_args")
+ offset += bthrift.Binary.WriteStructBegin(buf[offset:], "get_tablet_stat_args")
if p != nil {
- offset += p.fastWriteField1(buf[offset:], binaryWriter)
}
offset += bthrift.Binary.WriteFieldStop(buf[offset:])
offset += bthrift.Binary.WriteStructEnd(buf[offset:])
return offset
}
-func (p *BackendServiceTransmitDataArgs) BLength() int {
+func (p *BackendServiceGetTabletStatArgs) BLength() int {
l := 0
- l += bthrift.Binary.StructBeginLength("transmit_data_args")
+ l += bthrift.Binary.StructBeginLength("get_tablet_stat_args")
if p != nil {
- l += p.field1Length()
}
l += bthrift.Binary.FieldStopLength()
l += bthrift.Binary.StructEndLength()
return l
}
-func (p *BackendServiceTransmitDataArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int {
- offset := 0
- offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "params", thrift.STRUCT, 1)
- offset += p.Params.FastWriteNocopy(buf[offset:], binaryWriter)
- offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
- return offset
-}
-
-func (p *BackendServiceTransmitDataArgs) field1Length() int {
- l := 0
- l += bthrift.Binary.FieldBeginLength("params", thrift.STRUCT, 1)
- l += p.Params.BLength()
- l += bthrift.Binary.FieldEndLength()
- return l
-}
-
-func (p *BackendServiceTransmitDataResult) FastRead(buf []byte) (int, error) {
+func (p *BackendServiceGetTabletStatResult) FastRead(buf []byte) (int, error) {
var err error
var offset int
var l int
@@ -5759,7 +14169,7 @@ ReadStructBeginError:
ReadFieldBeginError:
return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
ReadFieldError:
- return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceTransmitDataResult[fieldId]), err)
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceGetTabletStatResult[fieldId]), err)
SkipFieldError:
return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
ReadFieldEndError:
@@ -5768,10 +14178,10 @@ ReadStructEndError:
return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
}
-func (p *BackendServiceTransmitDataResult) FastReadField0(buf []byte) (int, error) {
+func (p *BackendServiceGetTabletStatResult) FastReadField0(buf []byte) (int, error) {
offset := 0
- tmp := palointernalservice.NewTTransmitDataResult_()
+ tmp := NewTTabletStatResult_()
if l, err := tmp.FastRead(buf[offset:]); err != nil {
return offset, err
} else {
@@ -5782,13 +14192,13 @@ func (p *BackendServiceTransmitDataResult) FastReadField0(buf []byte) (int, erro
}
// for compatibility
-func (p *BackendServiceTransmitDataResult) FastWrite(buf []byte) int {
+func (p *BackendServiceGetTabletStatResult) FastWrite(buf []byte) int {
return 0
}
-func (p *BackendServiceTransmitDataResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+func (p *BackendServiceGetTabletStatResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int {
offset := 0
- offset += bthrift.Binary.WriteStructBegin(buf[offset:], "transmit_data_result")
+ offset += bthrift.Binary.WriteStructBegin(buf[offset:], "get_tablet_stat_result")
if p != nil {
offset += p.fastWriteField0(buf[offset:], binaryWriter)
}
@@ -5797,9 +14207,9 @@ func (p *BackendServiceTransmitDataResult) FastWriteNocopy(buf []byte, binaryWri
return offset
}
-func (p *BackendServiceTransmitDataResult) BLength() int {
+func (p *BackendServiceGetTabletStatResult) BLength() int {
l := 0
- l += bthrift.Binary.StructBeginLength("transmit_data_result")
+ l += bthrift.Binary.StructBeginLength("get_tablet_stat_result")
if p != nil {
l += p.field0Length()
}
@@ -5808,7 +14218,7 @@ func (p *BackendServiceTransmitDataResult) BLength() int {
return l
}
-func (p *BackendServiceTransmitDataResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+func (p *BackendServiceGetTabletStatResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int {
offset := 0
if p.IsSetSuccess() {
offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "success", thrift.STRUCT, 0)
@@ -5818,7 +14228,7 @@ func (p *BackendServiceTransmitDataResult) fastWriteField0(buf []byte, binaryWri
return offset
}
-func (p *BackendServiceTransmitDataResult) field0Length() int {
+func (p *BackendServiceGetTabletStatResult) field0Length() int {
l := 0
if p.IsSetSuccess() {
l += bthrift.Binary.FieldBeginLength("success", thrift.STRUCT, 0)
@@ -5828,7 +14238,7 @@ func (p *BackendServiceTransmitDataResult) field0Length() int {
return l
}
-func (p *BackendServiceSubmitTasksArgs) FastRead(buf []byte) (int, error) {
+func (p *BackendServiceGetTrashUsedCapacityArgs) FastRead(buf []byte) (int, error) {
var err error
var offset int
var l int
@@ -5849,27 +14259,10 @@ func (p *BackendServiceSubmitTasksArgs) FastRead(buf []byte) (int, error) {
if fieldTypeId == thrift.STOP {
break
}
- switch fieldId {
- case 1:
- if fieldTypeId == thrift.LIST {
- l, err = p.FastReadField1(buf[offset:])
- offset += l
- if err != nil {
- goto ReadFieldError
- }
- } else {
- l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
- offset += l
- if err != nil {
- goto SkipFieldError
- }
- }
- default:
- l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
- offset += l
- if err != nil {
- goto SkipFieldError
- }
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
}
l, err = bthrift.Binary.ReadFieldEnd(buf[offset:])
@@ -5889,8 +14282,6 @@ ReadStructBeginError:
return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err)
ReadFieldBeginError:
return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
-ReadFieldError:
- return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceSubmitTasksArgs[fieldId]), err)
SkipFieldError:
return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
ReadFieldEndError:
@@ -5899,89 +14290,32 @@ ReadStructEndError:
return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
}
-func (p *BackendServiceSubmitTasksArgs) FastReadField1(buf []byte) (int, error) {
- offset := 0
-
- _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:])
- offset += l
- if err != nil {
- return offset, err
- }
- p.Tasks = make([]*agentservice.TAgentTaskRequest, 0, size)
- for i := 0; i < size; i++ {
- _elem := agentservice.NewTAgentTaskRequest()
- if l, err := _elem.FastRead(buf[offset:]); err != nil {
- return offset, err
- } else {
- offset += l
- }
-
- p.Tasks = append(p.Tasks, _elem)
- }
- if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil {
- return offset, err
- } else {
- offset += l
- }
- return offset, nil
-}
-
// for compatibility
-func (p *BackendServiceSubmitTasksArgs) FastWrite(buf []byte) int {
+func (p *BackendServiceGetTrashUsedCapacityArgs) FastWrite(buf []byte) int {
return 0
}
-func (p *BackendServiceSubmitTasksArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+func (p *BackendServiceGetTrashUsedCapacityArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int {
offset := 0
- offset += bthrift.Binary.WriteStructBegin(buf[offset:], "submit_tasks_args")
+ offset += bthrift.Binary.WriteStructBegin(buf[offset:], "get_trash_used_capacity_args")
if p != nil {
- offset += p.fastWriteField1(buf[offset:], binaryWriter)
}
offset += bthrift.Binary.WriteFieldStop(buf[offset:])
offset += bthrift.Binary.WriteStructEnd(buf[offset:])
return offset
}
-func (p *BackendServiceSubmitTasksArgs) BLength() int {
+func (p *BackendServiceGetTrashUsedCapacityArgs) BLength() int {
l := 0
- l += bthrift.Binary.StructBeginLength("submit_tasks_args")
+ l += bthrift.Binary.StructBeginLength("get_trash_used_capacity_args")
if p != nil {
- l += p.field1Length()
}
l += bthrift.Binary.FieldStopLength()
l += bthrift.Binary.StructEndLength()
return l
}
-func (p *BackendServiceSubmitTasksArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int {
- offset := 0
- offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "tasks", thrift.LIST, 1)
- listBeginOffset := offset
- offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0)
- var length int
- for _, v := range p.Tasks {
- length++
- offset += v.FastWriteNocopy(buf[offset:], binaryWriter)
- }
- bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length)
- offset += bthrift.Binary.WriteListEnd(buf[offset:])
- offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
- return offset
-}
-
-func (p *BackendServiceSubmitTasksArgs) field1Length() int {
- l := 0
- l += bthrift.Binary.FieldBeginLength("tasks", thrift.LIST, 1)
- l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.Tasks))
- for _, v := range p.Tasks {
- l += v.BLength()
- }
- l += bthrift.Binary.ListEndLength()
- l += bthrift.Binary.FieldEndLength()
- return l
-}
-
-func (p *BackendServiceSubmitTasksResult) FastRead(buf []byte) (int, error) {
+func (p *BackendServiceGetTrashUsedCapacityResult) FastRead(buf []byte) (int, error) {
var err error
var offset int
var l int
@@ -6004,7 +14338,7 @@ func (p *BackendServiceSubmitTasksResult) FastRead(buf []byte) (int, error) {
}
switch fieldId {
case 0:
- if fieldTypeId == thrift.STRUCT {
+ if fieldTypeId == thrift.I64 {
l, err = p.FastReadField0(buf[offset:])
offset += l
if err != nil {
@@ -6043,7 +14377,7 @@ ReadStructBeginError:
ReadFieldBeginError:
return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
ReadFieldError:
- return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceSubmitTasksResult[fieldId]), err)
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceGetTrashUsedCapacityResult[fieldId]), err)
SkipFieldError:
return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
ReadFieldEndError:
@@ -6052,27 +14386,27 @@ ReadStructEndError:
return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
}
-func (p *BackendServiceSubmitTasksResult) FastReadField0(buf []byte) (int, error) {
+func (p *BackendServiceGetTrashUsedCapacityResult) FastReadField0(buf []byte) (int, error) {
offset := 0
- tmp := agentservice.NewTAgentResult_()
- if l, err := tmp.FastRead(buf[offset:]); err != nil {
+ if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil {
return offset, err
} else {
offset += l
+ p.Success = &v
+
}
- p.Success = tmp
return offset, nil
}
// for compatibility
-func (p *BackendServiceSubmitTasksResult) FastWrite(buf []byte) int {
+func (p *BackendServiceGetTrashUsedCapacityResult) FastWrite(buf []byte) int {
return 0
}
-func (p *BackendServiceSubmitTasksResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+func (p *BackendServiceGetTrashUsedCapacityResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int {
offset := 0
- offset += bthrift.Binary.WriteStructBegin(buf[offset:], "submit_tasks_result")
+ offset += bthrift.Binary.WriteStructBegin(buf[offset:], "get_trash_used_capacity_result")
if p != nil {
offset += p.fastWriteField0(buf[offset:], binaryWriter)
}
@@ -6081,9 +14415,9 @@ func (p *BackendServiceSubmitTasksResult) FastWriteNocopy(buf []byte, binaryWrit
return offset
}
-func (p *BackendServiceSubmitTasksResult) BLength() int {
+func (p *BackendServiceGetTrashUsedCapacityResult) BLength() int {
l := 0
- l += bthrift.Binary.StructBeginLength("submit_tasks_result")
+ l += bthrift.Binary.StructBeginLength("get_trash_used_capacity_result")
if p != nil {
l += p.field0Length()
}
@@ -6092,27 +14426,29 @@ func (p *BackendServiceSubmitTasksResult) BLength() int {
return l
}
-func (p *BackendServiceSubmitTasksResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+func (p *BackendServiceGetTrashUsedCapacityResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int {
offset := 0
if p.IsSetSuccess() {
- offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "success", thrift.STRUCT, 0)
- offset += p.Success.FastWriteNocopy(buf[offset:], binaryWriter)
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "success", thrift.I64, 0)
+ offset += bthrift.Binary.WriteI64(buf[offset:], *p.Success)
+
offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
}
return offset
}
-func (p *BackendServiceSubmitTasksResult) field0Length() int {
+func (p *BackendServiceGetTrashUsedCapacityResult) field0Length() int {
l := 0
if p.IsSetSuccess() {
- l += bthrift.Binary.FieldBeginLength("success", thrift.STRUCT, 0)
- l += p.Success.BLength()
+ l += bthrift.Binary.FieldBeginLength("success", thrift.I64, 0)
+ l += bthrift.Binary.I64Length(*p.Success)
+
l += bthrift.Binary.FieldEndLength()
}
return l
}
-func (p *BackendServiceMakeSnapshotArgs) FastRead(buf []byte) (int, error) {
+func (p *BackendServiceGetDiskTrashUsedCapacityArgs) FastRead(buf []byte) (int, error) {
var err error
var offset int
var l int
@@ -6133,27 +14469,10 @@ func (p *BackendServiceMakeSnapshotArgs) FastRead(buf []byte) (int, error) {
if fieldTypeId == thrift.STOP {
break
}
- switch fieldId {
- case 1:
- if fieldTypeId == thrift.STRUCT {
- l, err = p.FastReadField1(buf[offset:])
- offset += l
- if err != nil {
- goto ReadFieldError
- }
- } else {
- l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
- offset += l
- if err != nil {
- goto SkipFieldError
- }
- }
- default:
- l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
- offset += l
- if err != nil {
- goto SkipFieldError
- }
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
}
l, err = bthrift.Binary.ReadFieldEnd(buf[offset:])
@@ -6173,8 +14492,6 @@ ReadStructBeginError:
return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err)
ReadFieldBeginError:
return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
-ReadFieldError:
- return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceMakeSnapshotArgs[fieldId]), err)
SkipFieldError:
return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
ReadFieldEndError:
@@ -6183,63 +14500,32 @@ ReadStructEndError:
return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
}
-func (p *BackendServiceMakeSnapshotArgs) FastReadField1(buf []byte) (int, error) {
- offset := 0
-
- tmp := agentservice.NewTSnapshotRequest()
- if l, err := tmp.FastRead(buf[offset:]); err != nil {
- return offset, err
- } else {
- offset += l
- }
- p.SnapshotRequest = tmp
- return offset, nil
-}
-
// for compatibility
-func (p *BackendServiceMakeSnapshotArgs) FastWrite(buf []byte) int {
+func (p *BackendServiceGetDiskTrashUsedCapacityArgs) FastWrite(buf []byte) int {
return 0
}
-func (p *BackendServiceMakeSnapshotArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+func (p *BackendServiceGetDiskTrashUsedCapacityArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int {
offset := 0
- offset += bthrift.Binary.WriteStructBegin(buf[offset:], "make_snapshot_args")
+ offset += bthrift.Binary.WriteStructBegin(buf[offset:], "get_disk_trash_used_capacity_args")
if p != nil {
- offset += p.fastWriteField1(buf[offset:], binaryWriter)
}
offset += bthrift.Binary.WriteFieldStop(buf[offset:])
offset += bthrift.Binary.WriteStructEnd(buf[offset:])
return offset
}
-func (p *BackendServiceMakeSnapshotArgs) BLength() int {
+func (p *BackendServiceGetDiskTrashUsedCapacityArgs) BLength() int {
l := 0
- l += bthrift.Binary.StructBeginLength("make_snapshot_args")
+ l += bthrift.Binary.StructBeginLength("get_disk_trash_used_capacity_args")
if p != nil {
- l += p.field1Length()
}
l += bthrift.Binary.FieldStopLength()
l += bthrift.Binary.StructEndLength()
return l
}
-func (p *BackendServiceMakeSnapshotArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int {
- offset := 0
- offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "snapshot_request", thrift.STRUCT, 1)
- offset += p.SnapshotRequest.FastWriteNocopy(buf[offset:], binaryWriter)
- offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
- return offset
-}
-
-func (p *BackendServiceMakeSnapshotArgs) field1Length() int {
- l := 0
- l += bthrift.Binary.FieldBeginLength("snapshot_request", thrift.STRUCT, 1)
- l += p.SnapshotRequest.BLength()
- l += bthrift.Binary.FieldEndLength()
- return l
-}
-
-func (p *BackendServiceMakeSnapshotResult) FastRead(buf []byte) (int, error) {
+func (p *BackendServiceGetDiskTrashUsedCapacityResult) FastRead(buf []byte) (int, error) {
var err error
var offset int
var l int
@@ -6262,7 +14548,7 @@ func (p *BackendServiceMakeSnapshotResult) FastRead(buf []byte) (int, error) {
}
switch fieldId {
case 0:
- if fieldTypeId == thrift.STRUCT {
+ if fieldTypeId == thrift.LIST {
l, err = p.FastReadField0(buf[offset:])
offset += l
if err != nil {
@@ -6301,7 +14587,7 @@ ReadStructBeginError:
ReadFieldBeginError:
return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
ReadFieldError:
- return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceMakeSnapshotResult[fieldId]), err)
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceGetDiskTrashUsedCapacityResult[fieldId]), err)
SkipFieldError:
return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
ReadFieldEndError:
@@ -6310,27 +14596,41 @@ ReadStructEndError:
return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
}
-func (p *BackendServiceMakeSnapshotResult) FastReadField0(buf []byte) (int, error) {
+func (p *BackendServiceGetDiskTrashUsedCapacityResult) FastReadField0(buf []byte) (int, error) {
offset := 0
- tmp := agentservice.NewTAgentResult_()
- if l, err := tmp.FastRead(buf[offset:]); err != nil {
+ _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:])
+ offset += l
+ if err != nil {
+ return offset, err
+ }
+ p.Success = make([]*TDiskTrashInfo, 0, size)
+ for i := 0; i < size; i++ {
+ _elem := NewTDiskTrashInfo()
+ if l, err := _elem.FastRead(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+ }
+
+ p.Success = append(p.Success, _elem)
+ }
+ if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil {
return offset, err
} else {
offset += l
}
- p.Success = tmp
return offset, nil
}
// for compatibility
-func (p *BackendServiceMakeSnapshotResult) FastWrite(buf []byte) int {
+func (p *BackendServiceGetDiskTrashUsedCapacityResult) FastWrite(buf []byte) int {
return 0
}
-func (p *BackendServiceMakeSnapshotResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+func (p *BackendServiceGetDiskTrashUsedCapacityResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int {
offset := 0
- offset += bthrift.Binary.WriteStructBegin(buf[offset:], "make_snapshot_result")
+ offset += bthrift.Binary.WriteStructBegin(buf[offset:], "get_disk_trash_used_capacity_result")
if p != nil {
offset += p.fastWriteField0(buf[offset:], binaryWriter)
}
@@ -6339,9 +14639,9 @@ func (p *BackendServiceMakeSnapshotResult) FastWriteNocopy(buf []byte, binaryWri
return offset
}
-func (p *BackendServiceMakeSnapshotResult) BLength() int {
+func (p *BackendServiceGetDiskTrashUsedCapacityResult) BLength() int {
l := 0
- l += bthrift.Binary.StructBeginLength("make_snapshot_result")
+ l += bthrift.Binary.StructBeginLength("get_disk_trash_used_capacity_result")
if p != nil {
l += p.field0Length()
}
@@ -6350,27 +14650,39 @@ func (p *BackendServiceMakeSnapshotResult) BLength() int {
return l
}
-func (p *BackendServiceMakeSnapshotResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+func (p *BackendServiceGetDiskTrashUsedCapacityResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int {
offset := 0
if p.IsSetSuccess() {
- offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "success", thrift.STRUCT, 0)
- offset += p.Success.FastWriteNocopy(buf[offset:], binaryWriter)
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "success", thrift.LIST, 0)
+ listBeginOffset := offset
+ offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0)
+ var length int
+ for _, v := range p.Success {
+ length++
+ offset += v.FastWriteNocopy(buf[offset:], binaryWriter)
+ }
+ bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length)
+ offset += bthrift.Binary.WriteListEnd(buf[offset:])
offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
}
return offset
}
-func (p *BackendServiceMakeSnapshotResult) field0Length() int {
+func (p *BackendServiceGetDiskTrashUsedCapacityResult) field0Length() int {
l := 0
if p.IsSetSuccess() {
- l += bthrift.Binary.FieldBeginLength("success", thrift.STRUCT, 0)
- l += p.Success.BLength()
+ l += bthrift.Binary.FieldBeginLength("success", thrift.LIST, 0)
+ l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.Success))
+ for _, v := range p.Success {
+ l += v.BLength()
+ }
+ l += bthrift.Binary.ListEndLength()
l += bthrift.Binary.FieldEndLength()
}
return l
}
-func (p *BackendServiceReleaseSnapshotArgs) FastRead(buf []byte) (int, error) {
+func (p *BackendServiceSubmitRoutineLoadTaskArgs) FastRead(buf []byte) (int, error) {
var err error
var offset int
var l int
@@ -6393,7 +14705,7 @@ func (p *BackendServiceReleaseSnapshotArgs) FastRead(buf []byte) (int, error) {
}
switch fieldId {
case 1:
- if fieldTypeId == thrift.STRING {
+ if fieldTypeId == thrift.LIST {
l, err = p.FastReadField1(buf[offset:])
offset += l
if err != nil {
@@ -6432,7 +14744,7 @@ ReadStructBeginError:
ReadFieldBeginError:
return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
ReadFieldError:
- return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceReleaseSnapshotArgs[fieldId]), err)
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceSubmitRoutineLoadTaskArgs[fieldId]), err)
SkipFieldError:
return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
ReadFieldEndError:
@@ -6441,28 +14753,41 @@ ReadStructEndError:
return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
}
-func (p *BackendServiceReleaseSnapshotArgs) FastReadField1(buf []byte) (int, error) {
+func (p *BackendServiceSubmitRoutineLoadTaskArgs) FastReadField1(buf []byte) (int, error) {
offset := 0
- if v, l, err := bthrift.Binary.ReadString(buf[offset:]); err != nil {
+ _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:])
+ offset += l
+ if err != nil {
+ return offset, err
+ }
+ p.Tasks = make([]*TRoutineLoadTask, 0, size)
+ for i := 0; i < size; i++ {
+ _elem := NewTRoutineLoadTask()
+ if l, err := _elem.FastRead(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+ }
+
+ p.Tasks = append(p.Tasks, _elem)
+ }
+ if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil {
return offset, err
} else {
offset += l
-
- p.SnapshotPath = v
-
}
return offset, nil
}
// for compatibility
-func (p *BackendServiceReleaseSnapshotArgs) FastWrite(buf []byte) int {
+func (p *BackendServiceSubmitRoutineLoadTaskArgs) FastWrite(buf []byte) int {
return 0
}
-func (p *BackendServiceReleaseSnapshotArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+func (p *BackendServiceSubmitRoutineLoadTaskArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int {
offset := 0
- offset += bthrift.Binary.WriteStructBegin(buf[offset:], "release_snapshot_args")
+ offset += bthrift.Binary.WriteStructBegin(buf[offset:], "submit_routine_load_task_args")
if p != nil {
offset += p.fastWriteField1(buf[offset:], binaryWriter)
}
@@ -6471,9 +14796,9 @@ func (p *BackendServiceReleaseSnapshotArgs) FastWriteNocopy(buf []byte, binaryWr
return offset
}
-func (p *BackendServiceReleaseSnapshotArgs) BLength() int {
+func (p *BackendServiceSubmitRoutineLoadTaskArgs) BLength() int {
l := 0
- l += bthrift.Binary.StructBeginLength("release_snapshot_args")
+ l += bthrift.Binary.StructBeginLength("submit_routine_load_task_args")
if p != nil {
l += p.field1Length()
}
@@ -6482,25 +14807,35 @@ func (p *BackendServiceReleaseSnapshotArgs) BLength() int {
return l
}
-func (p *BackendServiceReleaseSnapshotArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+func (p *BackendServiceSubmitRoutineLoadTaskArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int {
offset := 0
- offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "snapshot_path", thrift.STRING, 1)
- offset += bthrift.Binary.WriteStringNocopy(buf[offset:], binaryWriter, p.SnapshotPath)
-
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "tasks", thrift.LIST, 1)
+ listBeginOffset := offset
+ offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0)
+ var length int
+ for _, v := range p.Tasks {
+ length++
+ offset += v.FastWriteNocopy(buf[offset:], binaryWriter)
+ }
+ bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length)
+ offset += bthrift.Binary.WriteListEnd(buf[offset:])
offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
return offset
}
-func (p *BackendServiceReleaseSnapshotArgs) field1Length() int {
+func (p *BackendServiceSubmitRoutineLoadTaskArgs) field1Length() int {
l := 0
- l += bthrift.Binary.FieldBeginLength("snapshot_path", thrift.STRING, 1)
- l += bthrift.Binary.StringLengthNocopy(p.SnapshotPath)
-
+ l += bthrift.Binary.FieldBeginLength("tasks", thrift.LIST, 1)
+ l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.Tasks))
+ for _, v := range p.Tasks {
+ l += v.BLength()
+ }
+ l += bthrift.Binary.ListEndLength()
l += bthrift.Binary.FieldEndLength()
return l
}
-func (p *BackendServiceReleaseSnapshotResult) FastRead(buf []byte) (int, error) {
+func (p *BackendServiceSubmitRoutineLoadTaskResult) FastRead(buf []byte) (int, error) {
var err error
var offset int
var l int
@@ -6562,7 +14897,7 @@ ReadStructBeginError:
ReadFieldBeginError:
return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
ReadFieldError:
- return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceReleaseSnapshotResult[fieldId]), err)
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceSubmitRoutineLoadTaskResult[fieldId]), err)
SkipFieldError:
return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
ReadFieldEndError:
@@ -6571,10 +14906,10 @@ ReadStructEndError:
return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
}
-func (p *BackendServiceReleaseSnapshotResult) FastReadField0(buf []byte) (int, error) {
+func (p *BackendServiceSubmitRoutineLoadTaskResult) FastReadField0(buf []byte) (int, error) {
offset := 0
- tmp := agentservice.NewTAgentResult_()
+ tmp := status.NewTStatus()
if l, err := tmp.FastRead(buf[offset:]); err != nil {
return offset, err
} else {
@@ -6585,13 +14920,13 @@ func (p *BackendServiceReleaseSnapshotResult) FastReadField0(buf []byte) (int, e
}
// for compatibility
-func (p *BackendServiceReleaseSnapshotResult) FastWrite(buf []byte) int {
+func (p *BackendServiceSubmitRoutineLoadTaskResult) FastWrite(buf []byte) int {
return 0
}
-func (p *BackendServiceReleaseSnapshotResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+func (p *BackendServiceSubmitRoutineLoadTaskResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int {
offset := 0
- offset += bthrift.Binary.WriteStructBegin(buf[offset:], "release_snapshot_result")
+ offset += bthrift.Binary.WriteStructBegin(buf[offset:], "submit_routine_load_task_result")
if p != nil {
offset += p.fastWriteField0(buf[offset:], binaryWriter)
}
@@ -6600,9 +14935,9 @@ func (p *BackendServiceReleaseSnapshotResult) FastWriteNocopy(buf []byte, binary
return offset
}
-func (p *BackendServiceReleaseSnapshotResult) BLength() int {
+func (p *BackendServiceSubmitRoutineLoadTaskResult) BLength() int {
l := 0
- l += bthrift.Binary.StructBeginLength("release_snapshot_result")
+ l += bthrift.Binary.StructBeginLength("submit_routine_load_task_result")
if p != nil {
l += p.field0Length()
}
@@ -6611,7 +14946,7 @@ func (p *BackendServiceReleaseSnapshotResult) BLength() int {
return l
}
-func (p *BackendServiceReleaseSnapshotResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+func (p *BackendServiceSubmitRoutineLoadTaskResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int {
offset := 0
if p.IsSetSuccess() {
offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "success", thrift.STRUCT, 0)
@@ -6621,7 +14956,7 @@ func (p *BackendServiceReleaseSnapshotResult) fastWriteField0(buf []byte, binary
return offset
}
-func (p *BackendServiceReleaseSnapshotResult) field0Length() int {
+func (p *BackendServiceSubmitRoutineLoadTaskResult) field0Length() int {
l := 0
if p.IsSetSuccess() {
l += bthrift.Binary.FieldBeginLength("success", thrift.STRUCT, 0)
@@ -6631,7 +14966,7 @@ func (p *BackendServiceReleaseSnapshotResult) field0Length() int {
return l
}
-func (p *BackendServicePublishClusterStateArgs) FastRead(buf []byte) (int, error) {
+func (p *BackendServiceOpenScannerArgs) FastRead(buf []byte) (int, error) {
var err error
var offset int
var l int
@@ -6693,7 +15028,7 @@ ReadStructBeginError:
ReadFieldBeginError:
return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
ReadFieldError:
- return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServicePublishClusterStateArgs[fieldId]), err)
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceOpenScannerArgs[fieldId]), err)
SkipFieldError:
return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
ReadFieldEndError:
@@ -6702,27 +15037,27 @@ ReadStructEndError:
return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
}
-func (p *BackendServicePublishClusterStateArgs) FastReadField1(buf []byte) (int, error) {
+func (p *BackendServiceOpenScannerArgs) FastReadField1(buf []byte) (int, error) {
offset := 0
- tmp := agentservice.NewTAgentPublishRequest()
+ tmp := dorisexternalservice.NewTScanOpenParams()
if l, err := tmp.FastRead(buf[offset:]); err != nil {
return offset, err
} else {
offset += l
}
- p.Request = tmp
+ p.Params = tmp
return offset, nil
}
// for compatibility
-func (p *BackendServicePublishClusterStateArgs) FastWrite(buf []byte) int {
+func (p *BackendServiceOpenScannerArgs) FastWrite(buf []byte) int {
return 0
}
-func (p *BackendServicePublishClusterStateArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+func (p *BackendServiceOpenScannerArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int {
offset := 0
- offset += bthrift.Binary.WriteStructBegin(buf[offset:], "publish_cluster_state_args")
+ offset += bthrift.Binary.WriteStructBegin(buf[offset:], "open_scanner_args")
if p != nil {
offset += p.fastWriteField1(buf[offset:], binaryWriter)
}
@@ -6731,9 +15066,9 @@ func (p *BackendServicePublishClusterStateArgs) FastWriteNocopy(buf []byte, bina
return offset
}
-func (p *BackendServicePublishClusterStateArgs) BLength() int {
+func (p *BackendServiceOpenScannerArgs) BLength() int {
l := 0
- l += bthrift.Binary.StructBeginLength("publish_cluster_state_args")
+ l += bthrift.Binary.StructBeginLength("open_scanner_args")
if p != nil {
l += p.field1Length()
}
@@ -6742,23 +15077,23 @@ func (p *BackendServicePublishClusterStateArgs) BLength() int {
return l
}
-func (p *BackendServicePublishClusterStateArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+func (p *BackendServiceOpenScannerArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int {
offset := 0
- offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "request", thrift.STRUCT, 1)
- offset += p.Request.FastWriteNocopy(buf[offset:], binaryWriter)
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "params", thrift.STRUCT, 1)
+ offset += p.Params.FastWriteNocopy(buf[offset:], binaryWriter)
offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
return offset
}
-func (p *BackendServicePublishClusterStateArgs) field1Length() int {
+func (p *BackendServiceOpenScannerArgs) field1Length() int {
l := 0
- l += bthrift.Binary.FieldBeginLength("request", thrift.STRUCT, 1)
- l += p.Request.BLength()
+ l += bthrift.Binary.FieldBeginLength("params", thrift.STRUCT, 1)
+ l += p.Params.BLength()
l += bthrift.Binary.FieldEndLength()
return l
}
-func (p *BackendServicePublishClusterStateResult) FastRead(buf []byte) (int, error) {
+func (p *BackendServiceOpenScannerResult) FastRead(buf []byte) (int, error) {
var err error
var offset int
var l int
@@ -6820,7 +15155,7 @@ ReadStructBeginError:
ReadFieldBeginError:
return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
ReadFieldError:
- return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServicePublishClusterStateResult[fieldId]), err)
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceOpenScannerResult[fieldId]), err)
SkipFieldError:
return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
ReadFieldEndError:
@@ -6829,10 +15164,10 @@ ReadStructEndError:
return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
}
-func (p *BackendServicePublishClusterStateResult) FastReadField0(buf []byte) (int, error) {
+func (p *BackendServiceOpenScannerResult) FastReadField0(buf []byte) (int, error) {
offset := 0
- tmp := agentservice.NewTAgentResult_()
+ tmp := dorisexternalservice.NewTScanOpenResult_()
if l, err := tmp.FastRead(buf[offset:]); err != nil {
return offset, err
} else {
@@ -6843,13 +15178,13 @@ func (p *BackendServicePublishClusterStateResult) FastReadField0(buf []byte) (in
}
// for compatibility
-func (p *BackendServicePublishClusterStateResult) FastWrite(buf []byte) int {
+func (p *BackendServiceOpenScannerResult) FastWrite(buf []byte) int {
return 0
}
-func (p *BackendServicePublishClusterStateResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+func (p *BackendServiceOpenScannerResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int {
offset := 0
- offset += bthrift.Binary.WriteStructBegin(buf[offset:], "publish_cluster_state_result")
+ offset += bthrift.Binary.WriteStructBegin(buf[offset:], "open_scanner_result")
if p != nil {
offset += p.fastWriteField0(buf[offset:], binaryWriter)
}
@@ -6858,9 +15193,9 @@ func (p *BackendServicePublishClusterStateResult) FastWriteNocopy(buf []byte, bi
return offset
}
-func (p *BackendServicePublishClusterStateResult) BLength() int {
+func (p *BackendServiceOpenScannerResult) BLength() int {
l := 0
- l += bthrift.Binary.StructBeginLength("publish_cluster_state_result")
+ l += bthrift.Binary.StructBeginLength("open_scanner_result")
if p != nil {
l += p.field0Length()
}
@@ -6869,7 +15204,7 @@ func (p *BackendServicePublishClusterStateResult) BLength() int {
return l
}
-func (p *BackendServicePublishClusterStateResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+func (p *BackendServiceOpenScannerResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int {
offset := 0
if p.IsSetSuccess() {
offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "success", thrift.STRUCT, 0)
@@ -6879,7 +15214,7 @@ func (p *BackendServicePublishClusterStateResult) fastWriteField0(buf []byte, bi
return offset
}
-func (p *BackendServicePublishClusterStateResult) field0Length() int {
+func (p *BackendServiceOpenScannerResult) field0Length() int {
l := 0
if p.IsSetSuccess() {
l += bthrift.Binary.FieldBeginLength("success", thrift.STRUCT, 0)
@@ -6889,7 +15224,7 @@ func (p *BackendServicePublishClusterStateResult) field0Length() int {
return l
}
-func (p *BackendServiceSubmitExportTaskArgs) FastRead(buf []byte) (int, error) {
+func (p *BackendServiceGetNextArgs) FastRead(buf []byte) (int, error) {
var err error
var offset int
var l int
@@ -6951,7 +15286,7 @@ ReadStructBeginError:
ReadFieldBeginError:
return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
ReadFieldError:
- return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceSubmitExportTaskArgs[fieldId]), err)
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceGetNextArgs[fieldId]), err)
SkipFieldError:
return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
ReadFieldEndError:
@@ -6960,27 +15295,27 @@ ReadStructEndError:
return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
}
-func (p *BackendServiceSubmitExportTaskArgs) FastReadField1(buf []byte) (int, error) {
+func (p *BackendServiceGetNextArgs) FastReadField1(buf []byte) (int, error) {
offset := 0
- tmp := NewTExportTaskRequest()
+ tmp := dorisexternalservice.NewTScanNextBatchParams()
if l, err := tmp.FastRead(buf[offset:]); err != nil {
return offset, err
} else {
offset += l
}
- p.Request = tmp
+ p.Params = tmp
return offset, nil
}
// for compatibility
-func (p *BackendServiceSubmitExportTaskArgs) FastWrite(buf []byte) int {
+func (p *BackendServiceGetNextArgs) FastWrite(buf []byte) int {
return 0
}
-func (p *BackendServiceSubmitExportTaskArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+func (p *BackendServiceGetNextArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int {
offset := 0
- offset += bthrift.Binary.WriteStructBegin(buf[offset:], "submit_export_task_args")
+ offset += bthrift.Binary.WriteStructBegin(buf[offset:], "get_next_args")
if p != nil {
offset += p.fastWriteField1(buf[offset:], binaryWriter)
}
@@ -6989,9 +15324,9 @@ func (p *BackendServiceSubmitExportTaskArgs) FastWriteNocopy(buf []byte, binaryW
return offset
}
-func (p *BackendServiceSubmitExportTaskArgs) BLength() int {
+func (p *BackendServiceGetNextArgs) BLength() int {
l := 0
- l += bthrift.Binary.StructBeginLength("submit_export_task_args")
+ l += bthrift.Binary.StructBeginLength("get_next_args")
if p != nil {
l += p.field1Length()
}
@@ -7000,23 +15335,23 @@ func (p *BackendServiceSubmitExportTaskArgs) BLength() int {
return l
}
-func (p *BackendServiceSubmitExportTaskArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+func (p *BackendServiceGetNextArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int {
offset := 0
- offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "request", thrift.STRUCT, 1)
- offset += p.Request.FastWriteNocopy(buf[offset:], binaryWriter)
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "params", thrift.STRUCT, 1)
+ offset += p.Params.FastWriteNocopy(buf[offset:], binaryWriter)
offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
return offset
}
-func (p *BackendServiceSubmitExportTaskArgs) field1Length() int {
+func (p *BackendServiceGetNextArgs) field1Length() int {
l := 0
- l += bthrift.Binary.FieldBeginLength("request", thrift.STRUCT, 1)
- l += p.Request.BLength()
+ l += bthrift.Binary.FieldBeginLength("params", thrift.STRUCT, 1)
+ l += p.Params.BLength()
l += bthrift.Binary.FieldEndLength()
return l
}
-func (p *BackendServiceSubmitExportTaskResult) FastRead(buf []byte) (int, error) {
+func (p *BackendServiceGetNextResult) FastRead(buf []byte) (int, error) {
var err error
var offset int
var l int
@@ -7078,7 +15413,7 @@ ReadStructBeginError:
ReadFieldBeginError:
return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
ReadFieldError:
- return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceSubmitExportTaskResult[fieldId]), err)
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceGetNextResult[fieldId]), err)
SkipFieldError:
return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
ReadFieldEndError:
@@ -7087,10 +15422,10 @@ ReadStructEndError:
return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
}
-func (p *BackendServiceSubmitExportTaskResult) FastReadField0(buf []byte) (int, error) {
+func (p *BackendServiceGetNextResult) FastReadField0(buf []byte) (int, error) {
offset := 0
- tmp := status.NewTStatus()
+ tmp := dorisexternalservice.NewTScanBatchResult_()
if l, err := tmp.FastRead(buf[offset:]); err != nil {
return offset, err
} else {
@@ -7101,13 +15436,13 @@ func (p *BackendServiceSubmitExportTaskResult) FastReadField0(buf []byte) (int,
}
// for compatibility
-func (p *BackendServiceSubmitExportTaskResult) FastWrite(buf []byte) int {
+func (p *BackendServiceGetNextResult) FastWrite(buf []byte) int {
return 0
}
-func (p *BackendServiceSubmitExportTaskResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+func (p *BackendServiceGetNextResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int {
offset := 0
- offset += bthrift.Binary.WriteStructBegin(buf[offset:], "submit_export_task_result")
+ offset += bthrift.Binary.WriteStructBegin(buf[offset:], "get_next_result")
if p != nil {
offset += p.fastWriteField0(buf[offset:], binaryWriter)
}
@@ -7116,9 +15451,9 @@ func (p *BackendServiceSubmitExportTaskResult) FastWriteNocopy(buf []byte, binar
return offset
}
-func (p *BackendServiceSubmitExportTaskResult) BLength() int {
+func (p *BackendServiceGetNextResult) BLength() int {
l := 0
- l += bthrift.Binary.StructBeginLength("submit_export_task_result")
+ l += bthrift.Binary.StructBeginLength("get_next_result")
if p != nil {
l += p.field0Length()
}
@@ -7127,7 +15462,7 @@ func (p *BackendServiceSubmitExportTaskResult) BLength() int {
return l
}
-func (p *BackendServiceSubmitExportTaskResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+func (p *BackendServiceGetNextResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int {
offset := 0
if p.IsSetSuccess() {
offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "success", thrift.STRUCT, 0)
@@ -7137,7 +15472,7 @@ func (p *BackendServiceSubmitExportTaskResult) fastWriteField0(buf []byte, binar
return offset
}
-func (p *BackendServiceSubmitExportTaskResult) field0Length() int {
+func (p *BackendServiceGetNextResult) field0Length() int {
l := 0
if p.IsSetSuccess() {
l += bthrift.Binary.FieldBeginLength("success", thrift.STRUCT, 0)
@@ -7147,7 +15482,7 @@ func (p *BackendServiceSubmitExportTaskResult) field0Length() int {
return l
}
-func (p *BackendServiceGetExportStatusArgs) FastRead(buf []byte) (int, error) {
+func (p *BackendServiceCloseScannerArgs) FastRead(buf []byte) (int, error) {
var err error
var offset int
var l int
@@ -7209,7 +15544,7 @@ ReadStructBeginError:
ReadFieldBeginError:
return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
ReadFieldError:
- return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceGetExportStatusArgs[fieldId]), err)
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceCloseScannerArgs[fieldId]), err)
SkipFieldError:
return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
ReadFieldEndError:
@@ -7218,27 +15553,27 @@ ReadStructEndError:
return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
}
-func (p *BackendServiceGetExportStatusArgs) FastReadField1(buf []byte) (int, error) {
+func (p *BackendServiceCloseScannerArgs) FastReadField1(buf []byte) (int, error) {
offset := 0
- tmp := types.NewTUniqueId()
+ tmp := dorisexternalservice.NewTScanCloseParams()
if l, err := tmp.FastRead(buf[offset:]); err != nil {
return offset, err
} else {
offset += l
}
- p.TaskId = tmp
+ p.Params = tmp
return offset, nil
}
// for compatibility
-func (p *BackendServiceGetExportStatusArgs) FastWrite(buf []byte) int {
+func (p *BackendServiceCloseScannerArgs) FastWrite(buf []byte) int {
return 0
}
-func (p *BackendServiceGetExportStatusArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+func (p *BackendServiceCloseScannerArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int {
offset := 0
- offset += bthrift.Binary.WriteStructBegin(buf[offset:], "get_export_status_args")
+ offset += bthrift.Binary.WriteStructBegin(buf[offset:], "close_scanner_args")
if p != nil {
offset += p.fastWriteField1(buf[offset:], binaryWriter)
}
@@ -7247,9 +15582,9 @@ func (p *BackendServiceGetExportStatusArgs) FastWriteNocopy(buf []byte, binaryWr
return offset
}
-func (p *BackendServiceGetExportStatusArgs) BLength() int {
+func (p *BackendServiceCloseScannerArgs) BLength() int {
l := 0
- l += bthrift.Binary.StructBeginLength("get_export_status_args")
+ l += bthrift.Binary.StructBeginLength("close_scanner_args")
if p != nil {
l += p.field1Length()
}
@@ -7258,23 +15593,23 @@ func (p *BackendServiceGetExportStatusArgs) BLength() int {
return l
}
-func (p *BackendServiceGetExportStatusArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+func (p *BackendServiceCloseScannerArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int {
offset := 0
- offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "task_id", thrift.STRUCT, 1)
- offset += p.TaskId.FastWriteNocopy(buf[offset:], binaryWriter)
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "params", thrift.STRUCT, 1)
+ offset += p.Params.FastWriteNocopy(buf[offset:], binaryWriter)
offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
return offset
}
-func (p *BackendServiceGetExportStatusArgs) field1Length() int {
+func (p *BackendServiceCloseScannerArgs) field1Length() int {
l := 0
- l += bthrift.Binary.FieldBeginLength("task_id", thrift.STRUCT, 1)
- l += p.TaskId.BLength()
+ l += bthrift.Binary.FieldBeginLength("params", thrift.STRUCT, 1)
+ l += p.Params.BLength()
l += bthrift.Binary.FieldEndLength()
return l
}
-func (p *BackendServiceGetExportStatusResult) FastRead(buf []byte) (int, error) {
+func (p *BackendServiceCloseScannerResult) FastRead(buf []byte) (int, error) {
var err error
var offset int
var l int
@@ -7336,7 +15671,7 @@ ReadStructBeginError:
ReadFieldBeginError:
return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
ReadFieldError:
- return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceGetExportStatusResult[fieldId]), err)
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceCloseScannerResult[fieldId]), err)
SkipFieldError:
return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
ReadFieldEndError:
@@ -7345,10 +15680,10 @@ ReadStructEndError:
return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
}
-func (p *BackendServiceGetExportStatusResult) FastReadField0(buf []byte) (int, error) {
+func (p *BackendServiceCloseScannerResult) FastReadField0(buf []byte) (int, error) {
offset := 0
- tmp := palointernalservice.NewTExportStatusResult_()
+ tmp := dorisexternalservice.NewTScanCloseResult_()
if l, err := tmp.FastRead(buf[offset:]); err != nil {
return offset, err
} else {
@@ -7359,13 +15694,13 @@ func (p *BackendServiceGetExportStatusResult) FastReadField0(buf []byte) (int, e
}
// for compatibility
-func (p *BackendServiceGetExportStatusResult) FastWrite(buf []byte) int {
+func (p *BackendServiceCloseScannerResult) FastWrite(buf []byte) int {
return 0
}
-func (p *BackendServiceGetExportStatusResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+func (p *BackendServiceCloseScannerResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int {
offset := 0
- offset += bthrift.Binary.WriteStructBegin(buf[offset:], "get_export_status_result")
+ offset += bthrift.Binary.WriteStructBegin(buf[offset:], "close_scanner_result")
if p != nil {
offset += p.fastWriteField0(buf[offset:], binaryWriter)
}
@@ -7374,9 +15709,9 @@ func (p *BackendServiceGetExportStatusResult) FastWriteNocopy(buf []byte, binary
return offset
}
-func (p *BackendServiceGetExportStatusResult) BLength() int {
+func (p *BackendServiceCloseScannerResult) BLength() int {
l := 0
- l += bthrift.Binary.StructBeginLength("get_export_status_result")
+ l += bthrift.Binary.StructBeginLength("close_scanner_result")
if p != nil {
l += p.field0Length()
}
@@ -7385,7 +15720,7 @@ func (p *BackendServiceGetExportStatusResult) BLength() int {
return l
}
-func (p *BackendServiceGetExportStatusResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+func (p *BackendServiceCloseScannerResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int {
offset := 0
if p.IsSetSuccess() {
offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "success", thrift.STRUCT, 0)
@@ -7395,7 +15730,7 @@ func (p *BackendServiceGetExportStatusResult) fastWriteField0(buf []byte, binary
return offset
}
-func (p *BackendServiceGetExportStatusResult) field0Length() int {
+func (p *BackendServiceCloseScannerResult) field0Length() int {
l := 0
if p.IsSetSuccess() {
l += bthrift.Binary.FieldBeginLength("success", thrift.STRUCT, 0)
@@ -7405,7 +15740,7 @@ func (p *BackendServiceGetExportStatusResult) field0Length() int {
return l
}
-func (p *BackendServiceEraseExportTaskArgs) FastRead(buf []byte) (int, error) {
+func (p *BackendServiceGetStreamLoadRecordArgs) FastRead(buf []byte) (int, error) {
var err error
var offset int
var l int
@@ -7428,7 +15763,7 @@ func (p *BackendServiceEraseExportTaskArgs) FastRead(buf []byte) (int, error) {
}
switch fieldId {
case 1:
- if fieldTypeId == thrift.STRUCT {
+ if fieldTypeId == thrift.I64 {
l, err = p.FastReadField1(buf[offset:])
offset += l
if err != nil {
@@ -7467,7 +15802,7 @@ ReadStructBeginError:
ReadFieldBeginError:
return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
ReadFieldError:
- return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceEraseExportTaskArgs[fieldId]), err)
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceGetStreamLoadRecordArgs[fieldId]), err)
SkipFieldError:
return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
ReadFieldEndError:
@@ -7476,27 +15811,28 @@ ReadStructEndError:
return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
}
-func (p *BackendServiceEraseExportTaskArgs) FastReadField1(buf []byte) (int, error) {
+func (p *BackendServiceGetStreamLoadRecordArgs) FastReadField1(buf []byte) (int, error) {
offset := 0
- tmp := types.NewTUniqueId()
- if l, err := tmp.FastRead(buf[offset:]); err != nil {
+ if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil {
return offset, err
} else {
offset += l
+
+ p.LastStreamRecordTime = v
+
}
- p.TaskId = tmp
return offset, nil
}
// for compatibility
-func (p *BackendServiceEraseExportTaskArgs) FastWrite(buf []byte) int {
+func (p *BackendServiceGetStreamLoadRecordArgs) FastWrite(buf []byte) int {
return 0
}
-func (p *BackendServiceEraseExportTaskArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+func (p *BackendServiceGetStreamLoadRecordArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int {
offset := 0
- offset += bthrift.Binary.WriteStructBegin(buf[offset:], "erase_export_task_args")
+ offset += bthrift.Binary.WriteStructBegin(buf[offset:], "get_stream_load_record_args")
if p != nil {
offset += p.fastWriteField1(buf[offset:], binaryWriter)
}
@@ -7505,9 +15841,9 @@ func (p *BackendServiceEraseExportTaskArgs) FastWriteNocopy(buf []byte, binaryWr
return offset
}
-func (p *BackendServiceEraseExportTaskArgs) BLength() int {
+func (p *BackendServiceGetStreamLoadRecordArgs) BLength() int {
l := 0
- l += bthrift.Binary.StructBeginLength("erase_export_task_args")
+ l += bthrift.Binary.StructBeginLength("get_stream_load_record_args")
if p != nil {
l += p.field1Length()
}
@@ -7516,23 +15852,25 @@ func (p *BackendServiceEraseExportTaskArgs) BLength() int {
return l
}
-func (p *BackendServiceEraseExportTaskArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+func (p *BackendServiceGetStreamLoadRecordArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int {
offset := 0
- offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "task_id", thrift.STRUCT, 1)
- offset += p.TaskId.FastWriteNocopy(buf[offset:], binaryWriter)
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "last_stream_record_time", thrift.I64, 1)
+ offset += bthrift.Binary.WriteI64(buf[offset:], p.LastStreamRecordTime)
+
offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
return offset
}
-func (p *BackendServiceEraseExportTaskArgs) field1Length() int {
+func (p *BackendServiceGetStreamLoadRecordArgs) field1Length() int {
l := 0
- l += bthrift.Binary.FieldBeginLength("task_id", thrift.STRUCT, 1)
- l += p.TaskId.BLength()
+ l += bthrift.Binary.FieldBeginLength("last_stream_record_time", thrift.I64, 1)
+ l += bthrift.Binary.I64Length(p.LastStreamRecordTime)
+
l += bthrift.Binary.FieldEndLength()
return l
}
-func (p *BackendServiceEraseExportTaskResult) FastRead(buf []byte) (int, error) {
+func (p *BackendServiceGetStreamLoadRecordResult) FastRead(buf []byte) (int, error) {
var err error
var offset int
var l int
@@ -7594,7 +15932,7 @@ ReadStructBeginError:
ReadFieldBeginError:
return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
ReadFieldError:
- return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceEraseExportTaskResult[fieldId]), err)
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceGetStreamLoadRecordResult[fieldId]), err)
SkipFieldError:
return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
ReadFieldEndError:
@@ -7603,10 +15941,10 @@ ReadStructEndError:
return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
}
-func (p *BackendServiceEraseExportTaskResult) FastReadField0(buf []byte) (int, error) {
+func (p *BackendServiceGetStreamLoadRecordResult) FastReadField0(buf []byte) (int, error) {
offset := 0
- tmp := status.NewTStatus()
+ tmp := NewTStreamLoadRecordResult_()
if l, err := tmp.FastRead(buf[offset:]); err != nil {
return offset, err
} else {
@@ -7617,13 +15955,13 @@ func (p *BackendServiceEraseExportTaskResult) FastReadField0(buf []byte) (int, e
}
// for compatibility
-func (p *BackendServiceEraseExportTaskResult) FastWrite(buf []byte) int {
+func (p *BackendServiceGetStreamLoadRecordResult) FastWrite(buf []byte) int {
return 0
}
-func (p *BackendServiceEraseExportTaskResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+func (p *BackendServiceGetStreamLoadRecordResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int {
offset := 0
- offset += bthrift.Binary.WriteStructBegin(buf[offset:], "erase_export_task_result")
+ offset += bthrift.Binary.WriteStructBegin(buf[offset:], "get_stream_load_record_result")
if p != nil {
offset += p.fastWriteField0(buf[offset:], binaryWriter)
}
@@ -7632,9 +15970,9 @@ func (p *BackendServiceEraseExportTaskResult) FastWriteNocopy(buf []byte, binary
return offset
}
-func (p *BackendServiceEraseExportTaskResult) BLength() int {
+func (p *BackendServiceGetStreamLoadRecordResult) BLength() int {
l := 0
- l += bthrift.Binary.StructBeginLength("erase_export_task_result")
+ l += bthrift.Binary.StructBeginLength("get_stream_load_record_result")
if p != nil {
l += p.field0Length()
}
@@ -7643,7 +15981,7 @@ func (p *BackendServiceEraseExportTaskResult) BLength() int {
return l
}
-func (p *BackendServiceEraseExportTaskResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+func (p *BackendServiceGetStreamLoadRecordResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int {
offset := 0
if p.IsSetSuccess() {
offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "success", thrift.STRUCT, 0)
@@ -7653,7 +15991,7 @@ func (p *BackendServiceEraseExportTaskResult) fastWriteField0(buf []byte, binary
return offset
}
-func (p *BackendServiceEraseExportTaskResult) field0Length() int {
+func (p *BackendServiceGetStreamLoadRecordResult) field0Length() int {
l := 0
if p.IsSetSuccess() {
l += bthrift.Binary.FieldBeginLength("success", thrift.STRUCT, 0)
@@ -7663,7 +16001,7 @@ func (p *BackendServiceEraseExportTaskResult) field0Length() int {
return l
}
-func (p *BackendServiceGetTabletStatArgs) FastRead(buf []byte) (int, error) {
+func (p *BackendServiceCheckStorageFormatArgs) FastRead(buf []byte) (int, error) {
var err error
var offset int
var l int
@@ -7687,7 +16025,7 @@ func (p *BackendServiceGetTabletStatArgs) FastRead(buf []byte) (int, error) {
l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
offset += l
if err != nil {
- goto SkipFieldTypeError
+ goto SkipFieldError
}
l, err = bthrift.Binary.ReadFieldEnd(buf[offset:])
@@ -7707,9 +16045,8 @@ ReadStructBeginError:
return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err)
ReadFieldBeginError:
return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
-
-SkipFieldTypeError:
- return offset, thrift.PrependError(fmt.Sprintf("%T skip field type %d error", p, fieldTypeId), err)
+SkipFieldError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
ReadFieldEndError:
return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err)
ReadStructEndError:
@@ -7717,13 +16054,13 @@ ReadStructEndError:
}
// for compatibility
-func (p *BackendServiceGetTabletStatArgs) FastWrite(buf []byte) int {
+func (p *BackendServiceCheckStorageFormatArgs) FastWrite(buf []byte) int {
return 0
}
-func (p *BackendServiceGetTabletStatArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+func (p *BackendServiceCheckStorageFormatArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int {
offset := 0
- offset += bthrift.Binary.WriteStructBegin(buf[offset:], "get_tablet_stat_args")
+ offset += bthrift.Binary.WriteStructBegin(buf[offset:], "check_storage_format_args")
if p != nil {
}
offset += bthrift.Binary.WriteFieldStop(buf[offset:])
@@ -7731,9 +16068,9 @@ func (p *BackendServiceGetTabletStatArgs) FastWriteNocopy(buf []byte, binaryWrit
return offset
}
-func (p *BackendServiceGetTabletStatArgs) BLength() int {
+func (p *BackendServiceCheckStorageFormatArgs) BLength() int {
l := 0
- l += bthrift.Binary.StructBeginLength("get_tablet_stat_args")
+ l += bthrift.Binary.StructBeginLength("check_storage_format_args")
if p != nil {
}
l += bthrift.Binary.FieldStopLength()
@@ -7741,7 +16078,7 @@ func (p *BackendServiceGetTabletStatArgs) BLength() int {
return l
}
-func (p *BackendServiceGetTabletStatResult) FastRead(buf []byte) (int, error) {
+func (p *BackendServiceCheckStorageFormatResult) FastRead(buf []byte) (int, error) {
var err error
var offset int
var l int
@@ -7803,7 +16140,7 @@ ReadStructBeginError:
ReadFieldBeginError:
return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
ReadFieldError:
- return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceGetTabletStatResult[fieldId]), err)
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceCheckStorageFormatResult[fieldId]), err)
SkipFieldError:
return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
ReadFieldEndError:
@@ -7812,10 +16149,10 @@ ReadStructEndError:
return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
}
-func (p *BackendServiceGetTabletStatResult) FastReadField0(buf []byte) (int, error) {
+func (p *BackendServiceCheckStorageFormatResult) FastReadField0(buf []byte) (int, error) {
offset := 0
- tmp := NewTTabletStatResult_()
+ tmp := NewTCheckStorageFormatResult_()
if l, err := tmp.FastRead(buf[offset:]); err != nil {
return offset, err
} else {
@@ -7826,13 +16163,13 @@ func (p *BackendServiceGetTabletStatResult) FastReadField0(buf []byte) (int, err
}
// for compatibility
-func (p *BackendServiceGetTabletStatResult) FastWrite(buf []byte) int {
+func (p *BackendServiceCheckStorageFormatResult) FastWrite(buf []byte) int {
return 0
}
-func (p *BackendServiceGetTabletStatResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+func (p *BackendServiceCheckStorageFormatResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int {
offset := 0
- offset += bthrift.Binary.WriteStructBegin(buf[offset:], "get_tablet_stat_result")
+ offset += bthrift.Binary.WriteStructBegin(buf[offset:], "check_storage_format_result")
if p != nil {
offset += p.fastWriteField0(buf[offset:], binaryWriter)
}
@@ -7841,9 +16178,9 @@ func (p *BackendServiceGetTabletStatResult) FastWriteNocopy(buf []byte, binaryWr
return offset
}
-func (p *BackendServiceGetTabletStatResult) BLength() int {
+func (p *BackendServiceCheckStorageFormatResult) BLength() int {
l := 0
- l += bthrift.Binary.StructBeginLength("get_tablet_stat_result")
+ l += bthrift.Binary.StructBeginLength("check_storage_format_result")
if p != nil {
l += p.field0Length()
}
@@ -7852,7 +16189,7 @@ func (p *BackendServiceGetTabletStatResult) BLength() int {
return l
}
-func (p *BackendServiceGetTabletStatResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+func (p *BackendServiceCheckStorageFormatResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int {
offset := 0
if p.IsSetSuccess() {
offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "success", thrift.STRUCT, 0)
@@ -7862,7 +16199,7 @@ func (p *BackendServiceGetTabletStatResult) fastWriteField0(buf []byte, binaryWr
return offset
}
-func (p *BackendServiceGetTabletStatResult) field0Length() int {
+func (p *BackendServiceCheckStorageFormatResult) field0Length() int {
l := 0
if p.IsSetSuccess() {
l += bthrift.Binary.FieldBeginLength("success", thrift.STRUCT, 0)
@@ -7872,7 +16209,7 @@ func (p *BackendServiceGetTabletStatResult) field0Length() int {
return l
}
-func (p *BackendServiceGetTrashUsedCapacityArgs) FastRead(buf []byte) (int, error) {
+func (p *BackendServiceWarmUpCacheAsyncArgs) FastRead(buf []byte) (int, error) {
var err error
var offset int
var l int
@@ -7893,10 +16230,27 @@ func (p *BackendServiceGetTrashUsedCapacityArgs) FastRead(buf []byte) (int, erro
if fieldTypeId == thrift.STOP {
break
}
- l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
- offset += l
- if err != nil {
- goto SkipFieldTypeError
+ switch fieldId {
+ case 1:
+ if fieldTypeId == thrift.STRUCT {
+ l, err = p.FastReadField1(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+ default:
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
}
l, err = bthrift.Binary.ReadFieldEnd(buf[offset:])
@@ -7916,41 +16270,73 @@ ReadStructBeginError:
return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err)
ReadFieldBeginError:
return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
-
-SkipFieldTypeError:
- return offset, thrift.PrependError(fmt.Sprintf("%T skip field type %d error", p, fieldTypeId), err)
+ReadFieldError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceWarmUpCacheAsyncArgs[fieldId]), err)
+SkipFieldError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
ReadFieldEndError:
return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err)
ReadStructEndError:
return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
}
+func (p *BackendServiceWarmUpCacheAsyncArgs) FastReadField1(buf []byte) (int, error) {
+ offset := 0
+
+ tmp := NewTWarmUpCacheAsyncRequest()
+ if l, err := tmp.FastRead(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+ }
+ p.Request = tmp
+ return offset, nil
+}
+
// for compatibility
-func (p *BackendServiceGetTrashUsedCapacityArgs) FastWrite(buf []byte) int {
+func (p *BackendServiceWarmUpCacheAsyncArgs) FastWrite(buf []byte) int {
return 0
}
-func (p *BackendServiceGetTrashUsedCapacityArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+func (p *BackendServiceWarmUpCacheAsyncArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int {
offset := 0
- offset += bthrift.Binary.WriteStructBegin(buf[offset:], "get_trash_used_capacity_args")
+ offset += bthrift.Binary.WriteStructBegin(buf[offset:], "warm_up_cache_async_args")
if p != nil {
+ offset += p.fastWriteField1(buf[offset:], binaryWriter)
}
offset += bthrift.Binary.WriteFieldStop(buf[offset:])
offset += bthrift.Binary.WriteStructEnd(buf[offset:])
return offset
}
-func (p *BackendServiceGetTrashUsedCapacityArgs) BLength() int {
+func (p *BackendServiceWarmUpCacheAsyncArgs) BLength() int {
l := 0
- l += bthrift.Binary.StructBeginLength("get_trash_used_capacity_args")
+ l += bthrift.Binary.StructBeginLength("warm_up_cache_async_args")
if p != nil {
+ l += p.field1Length()
}
l += bthrift.Binary.FieldStopLength()
l += bthrift.Binary.StructEndLength()
return l
}
-func (p *BackendServiceGetTrashUsedCapacityResult) FastRead(buf []byte) (int, error) {
+func (p *BackendServiceWarmUpCacheAsyncArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "request", thrift.STRUCT, 1)
+ offset += p.Request.FastWriteNocopy(buf[offset:], binaryWriter)
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ return offset
+}
+
+func (p *BackendServiceWarmUpCacheAsyncArgs) field1Length() int {
+ l := 0
+ l += bthrift.Binary.FieldBeginLength("request", thrift.STRUCT, 1)
+ l += p.Request.BLength()
+ l += bthrift.Binary.FieldEndLength()
+ return l
+}
+
+func (p *BackendServiceWarmUpCacheAsyncResult) FastRead(buf []byte) (int, error) {
var err error
var offset int
var l int
@@ -7973,7 +16359,7 @@ func (p *BackendServiceGetTrashUsedCapacityResult) FastRead(buf []byte) (int, er
}
switch fieldId {
case 0:
- if fieldTypeId == thrift.I64 {
+ if fieldTypeId == thrift.STRUCT {
l, err = p.FastReadField0(buf[offset:])
offset += l
if err != nil {
@@ -8012,7 +16398,7 @@ ReadStructBeginError:
ReadFieldBeginError:
return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
ReadFieldError:
- return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceGetTrashUsedCapacityResult[fieldId]), err)
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceWarmUpCacheAsyncResult[fieldId]), err)
SkipFieldError:
return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
ReadFieldEndError:
@@ -8021,27 +16407,27 @@ ReadStructEndError:
return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
}
-func (p *BackendServiceGetTrashUsedCapacityResult) FastReadField0(buf []byte) (int, error) {
+func (p *BackendServiceWarmUpCacheAsyncResult) FastReadField0(buf []byte) (int, error) {
offset := 0
- if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil {
+ tmp := NewTWarmUpCacheAsyncResponse()
+ if l, err := tmp.FastRead(buf[offset:]); err != nil {
return offset, err
} else {
offset += l
- p.Success = &v
-
}
+ p.Success = tmp
return offset, nil
}
// for compatibility
-func (p *BackendServiceGetTrashUsedCapacityResult) FastWrite(buf []byte) int {
+func (p *BackendServiceWarmUpCacheAsyncResult) FastWrite(buf []byte) int {
return 0
}
-func (p *BackendServiceGetTrashUsedCapacityResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+func (p *BackendServiceWarmUpCacheAsyncResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int {
offset := 0
- offset += bthrift.Binary.WriteStructBegin(buf[offset:], "get_trash_used_capacity_result")
+ offset += bthrift.Binary.WriteStructBegin(buf[offset:], "warm_up_cache_async_result")
if p != nil {
offset += p.fastWriteField0(buf[offset:], binaryWriter)
}
@@ -8050,9 +16436,9 @@ func (p *BackendServiceGetTrashUsedCapacityResult) FastWriteNocopy(buf []byte, b
return offset
}
-func (p *BackendServiceGetTrashUsedCapacityResult) BLength() int {
+func (p *BackendServiceWarmUpCacheAsyncResult) BLength() int {
l := 0
- l += bthrift.Binary.StructBeginLength("get_trash_used_capacity_result")
+ l += bthrift.Binary.StructBeginLength("warm_up_cache_async_result")
if p != nil {
l += p.field0Length()
}
@@ -8061,29 +16447,27 @@ func (p *BackendServiceGetTrashUsedCapacityResult) BLength() int {
return l
}
-func (p *BackendServiceGetTrashUsedCapacityResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+func (p *BackendServiceWarmUpCacheAsyncResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int {
offset := 0
if p.IsSetSuccess() {
- offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "success", thrift.I64, 0)
- offset += bthrift.Binary.WriteI64(buf[offset:], *p.Success)
-
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "success", thrift.STRUCT, 0)
+ offset += p.Success.FastWriteNocopy(buf[offset:], binaryWriter)
offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
}
return offset
}
-func (p *BackendServiceGetTrashUsedCapacityResult) field0Length() int {
+func (p *BackendServiceWarmUpCacheAsyncResult) field0Length() int {
l := 0
if p.IsSetSuccess() {
- l += bthrift.Binary.FieldBeginLength("success", thrift.I64, 0)
- l += bthrift.Binary.I64Length(*p.Success)
-
+ l += bthrift.Binary.FieldBeginLength("success", thrift.STRUCT, 0)
+ l += p.Success.BLength()
l += bthrift.Binary.FieldEndLength()
}
return l
}
-func (p *BackendServiceGetDiskTrashUsedCapacityArgs) FastRead(buf []byte) (int, error) {
+func (p *BackendServiceCheckWarmUpCacheAsyncArgs) FastRead(buf []byte) (int, error) {
var err error
var offset int
var l int
@@ -8104,10 +16488,27 @@ func (p *BackendServiceGetDiskTrashUsedCapacityArgs) FastRead(buf []byte) (int,
if fieldTypeId == thrift.STOP {
break
}
- l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
- offset += l
- if err != nil {
- goto SkipFieldTypeError
+ switch fieldId {
+ case 1:
+ if fieldTypeId == thrift.STRUCT {
+ l, err = p.FastReadField1(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+ default:
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
}
l, err = bthrift.Binary.ReadFieldEnd(buf[offset:])
@@ -8127,41 +16528,73 @@ ReadStructBeginError:
return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err)
ReadFieldBeginError:
return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
-
-SkipFieldTypeError:
- return offset, thrift.PrependError(fmt.Sprintf("%T skip field type %d error", p, fieldTypeId), err)
+ReadFieldError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceCheckWarmUpCacheAsyncArgs[fieldId]), err)
+SkipFieldError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
ReadFieldEndError:
return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err)
ReadStructEndError:
return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
}
+func (p *BackendServiceCheckWarmUpCacheAsyncArgs) FastReadField1(buf []byte) (int, error) {
+ offset := 0
+
+ tmp := NewTCheckWarmUpCacheAsyncRequest()
+ if l, err := tmp.FastRead(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+ }
+ p.Request = tmp
+ return offset, nil
+}
+
// for compatibility
-func (p *BackendServiceGetDiskTrashUsedCapacityArgs) FastWrite(buf []byte) int {
+func (p *BackendServiceCheckWarmUpCacheAsyncArgs) FastWrite(buf []byte) int {
return 0
}
-func (p *BackendServiceGetDiskTrashUsedCapacityArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+func (p *BackendServiceCheckWarmUpCacheAsyncArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int {
offset := 0
- offset += bthrift.Binary.WriteStructBegin(buf[offset:], "get_disk_trash_used_capacity_args")
+ offset += bthrift.Binary.WriteStructBegin(buf[offset:], "check_warm_up_cache_async_args")
if p != nil {
+ offset += p.fastWriteField1(buf[offset:], binaryWriter)
}
offset += bthrift.Binary.WriteFieldStop(buf[offset:])
offset += bthrift.Binary.WriteStructEnd(buf[offset:])
return offset
}
-func (p *BackendServiceGetDiskTrashUsedCapacityArgs) BLength() int {
+func (p *BackendServiceCheckWarmUpCacheAsyncArgs) BLength() int {
l := 0
- l += bthrift.Binary.StructBeginLength("get_disk_trash_used_capacity_args")
+ l += bthrift.Binary.StructBeginLength("check_warm_up_cache_async_args")
if p != nil {
+ l += p.field1Length()
}
l += bthrift.Binary.FieldStopLength()
l += bthrift.Binary.StructEndLength()
return l
}
-func (p *BackendServiceGetDiskTrashUsedCapacityResult) FastRead(buf []byte) (int, error) {
+func (p *BackendServiceCheckWarmUpCacheAsyncArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "request", thrift.STRUCT, 1)
+ offset += p.Request.FastWriteNocopy(buf[offset:], binaryWriter)
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ return offset
+}
+
+func (p *BackendServiceCheckWarmUpCacheAsyncArgs) field1Length() int {
+ l := 0
+ l += bthrift.Binary.FieldBeginLength("request", thrift.STRUCT, 1)
+ l += p.Request.BLength()
+ l += bthrift.Binary.FieldEndLength()
+ return l
+}
+
+func (p *BackendServiceCheckWarmUpCacheAsyncResult) FastRead(buf []byte) (int, error) {
var err error
var offset int
var l int
@@ -8184,7 +16617,7 @@ func (p *BackendServiceGetDiskTrashUsedCapacityResult) FastRead(buf []byte) (int
}
switch fieldId {
case 0:
- if fieldTypeId == thrift.LIST {
+ if fieldTypeId == thrift.STRUCT {
l, err = p.FastReadField0(buf[offset:])
offset += l
if err != nil {
@@ -8223,7 +16656,7 @@ ReadStructBeginError:
ReadFieldBeginError:
return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
ReadFieldError:
- return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceGetDiskTrashUsedCapacityResult[fieldId]), err)
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceCheckWarmUpCacheAsyncResult[fieldId]), err)
SkipFieldError:
return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
ReadFieldEndError:
@@ -8232,41 +16665,27 @@ ReadStructEndError:
return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
}
-func (p *BackendServiceGetDiskTrashUsedCapacityResult) FastReadField0(buf []byte) (int, error) {
+func (p *BackendServiceCheckWarmUpCacheAsyncResult) FastReadField0(buf []byte) (int, error) {
offset := 0
- _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:])
- offset += l
- if err != nil {
- return offset, err
- }
- p.Success = make([]*TDiskTrashInfo, 0, size)
- for i := 0; i < size; i++ {
- _elem := NewTDiskTrashInfo()
- if l, err := _elem.FastRead(buf[offset:]); err != nil {
- return offset, err
- } else {
- offset += l
- }
-
- p.Success = append(p.Success, _elem)
- }
- if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil {
+ tmp := NewTCheckWarmUpCacheAsyncResponse()
+ if l, err := tmp.FastRead(buf[offset:]); err != nil {
return offset, err
} else {
offset += l
}
+ p.Success = tmp
return offset, nil
}
// for compatibility
-func (p *BackendServiceGetDiskTrashUsedCapacityResult) FastWrite(buf []byte) int {
+func (p *BackendServiceCheckWarmUpCacheAsyncResult) FastWrite(buf []byte) int {
return 0
}
-func (p *BackendServiceGetDiskTrashUsedCapacityResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+func (p *BackendServiceCheckWarmUpCacheAsyncResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int {
offset := 0
- offset += bthrift.Binary.WriteStructBegin(buf[offset:], "get_disk_trash_used_capacity_result")
+ offset += bthrift.Binary.WriteStructBegin(buf[offset:], "check_warm_up_cache_async_result")
if p != nil {
offset += p.fastWriteField0(buf[offset:], binaryWriter)
}
@@ -8275,9 +16694,9 @@ func (p *BackendServiceGetDiskTrashUsedCapacityResult) FastWriteNocopy(buf []byt
return offset
}
-func (p *BackendServiceGetDiskTrashUsedCapacityResult) BLength() int {
+func (p *BackendServiceCheckWarmUpCacheAsyncResult) BLength() int {
l := 0
- l += bthrift.Binary.StructBeginLength("get_disk_trash_used_capacity_result")
+ l += bthrift.Binary.StructBeginLength("check_warm_up_cache_async_result")
if p != nil {
l += p.field0Length()
}
@@ -8286,39 +16705,27 @@ func (p *BackendServiceGetDiskTrashUsedCapacityResult) BLength() int {
return l
}
-func (p *BackendServiceGetDiskTrashUsedCapacityResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+func (p *BackendServiceCheckWarmUpCacheAsyncResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int {
offset := 0
if p.IsSetSuccess() {
- offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "success", thrift.LIST, 0)
- listBeginOffset := offset
- offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0)
- var length int
- for _, v := range p.Success {
- length++
- offset += v.FastWriteNocopy(buf[offset:], binaryWriter)
- }
- bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length)
- offset += bthrift.Binary.WriteListEnd(buf[offset:])
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "success", thrift.STRUCT, 0)
+ offset += p.Success.FastWriteNocopy(buf[offset:], binaryWriter)
offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
}
return offset
}
-func (p *BackendServiceGetDiskTrashUsedCapacityResult) field0Length() int {
+func (p *BackendServiceCheckWarmUpCacheAsyncResult) field0Length() int {
l := 0
if p.IsSetSuccess() {
- l += bthrift.Binary.FieldBeginLength("success", thrift.LIST, 0)
- l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.Success))
- for _, v := range p.Success {
- l += v.BLength()
- }
- l += bthrift.Binary.ListEndLength()
+ l += bthrift.Binary.FieldBeginLength("success", thrift.STRUCT, 0)
+ l += p.Success.BLength()
l += bthrift.Binary.FieldEndLength()
}
return l
}
-func (p *BackendServiceSubmitRoutineLoadTaskArgs) FastRead(buf []byte) (int, error) {
+func (p *BackendServiceSyncLoadForTabletsArgs) FastRead(buf []byte) (int, error) {
var err error
var offset int
var l int
@@ -8341,7 +16748,7 @@ func (p *BackendServiceSubmitRoutineLoadTaskArgs) FastRead(buf []byte) (int, err
}
switch fieldId {
case 1:
- if fieldTypeId == thrift.LIST {
+ if fieldTypeId == thrift.STRUCT {
l, err = p.FastReadField1(buf[offset:])
offset += l
if err != nil {
@@ -8380,7 +16787,7 @@ ReadStructBeginError:
ReadFieldBeginError:
return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
ReadFieldError:
- return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceSubmitRoutineLoadTaskArgs[fieldId]), err)
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceSyncLoadForTabletsArgs[fieldId]), err)
SkipFieldError:
return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
ReadFieldEndError:
@@ -8389,41 +16796,27 @@ ReadStructEndError:
return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
}
-func (p *BackendServiceSubmitRoutineLoadTaskArgs) FastReadField1(buf []byte) (int, error) {
+func (p *BackendServiceSyncLoadForTabletsArgs) FastReadField1(buf []byte) (int, error) {
offset := 0
- _, size, l, err := bthrift.Binary.ReadListBegin(buf[offset:])
- offset += l
- if err != nil {
- return offset, err
- }
- p.Tasks = make([]*TRoutineLoadTask, 0, size)
- for i := 0; i < size; i++ {
- _elem := NewTRoutineLoadTask()
- if l, err := _elem.FastRead(buf[offset:]); err != nil {
- return offset, err
- } else {
- offset += l
- }
-
- p.Tasks = append(p.Tasks, _elem)
- }
- if l, err := bthrift.Binary.ReadListEnd(buf[offset:]); err != nil {
+ tmp := NewTSyncLoadForTabletsRequest()
+ if l, err := tmp.FastRead(buf[offset:]); err != nil {
return offset, err
} else {
offset += l
}
+ p.Request = tmp
return offset, nil
}
// for compatibility
-func (p *BackendServiceSubmitRoutineLoadTaskArgs) FastWrite(buf []byte) int {
+func (p *BackendServiceSyncLoadForTabletsArgs) FastWrite(buf []byte) int {
return 0
}
-func (p *BackendServiceSubmitRoutineLoadTaskArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+func (p *BackendServiceSyncLoadForTabletsArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int {
offset := 0
- offset += bthrift.Binary.WriteStructBegin(buf[offset:], "submit_routine_load_task_args")
+ offset += bthrift.Binary.WriteStructBegin(buf[offset:], "sync_load_for_tablets_args")
if p != nil {
offset += p.fastWriteField1(buf[offset:], binaryWriter)
}
@@ -8432,9 +16825,9 @@ func (p *BackendServiceSubmitRoutineLoadTaskArgs) FastWriteNocopy(buf []byte, bi
return offset
}
-func (p *BackendServiceSubmitRoutineLoadTaskArgs) BLength() int {
+func (p *BackendServiceSyncLoadForTabletsArgs) BLength() int {
l := 0
- l += bthrift.Binary.StructBeginLength("submit_routine_load_task_args")
+ l += bthrift.Binary.StructBeginLength("sync_load_for_tablets_args")
if p != nil {
l += p.field1Length()
}
@@ -8443,35 +16836,23 @@ func (p *BackendServiceSubmitRoutineLoadTaskArgs) BLength() int {
return l
}
-func (p *BackendServiceSubmitRoutineLoadTaskArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+func (p *BackendServiceSyncLoadForTabletsArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int {
offset := 0
- offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "tasks", thrift.LIST, 1)
- listBeginOffset := offset
- offset += bthrift.Binary.ListBeginLength(thrift.STRUCT, 0)
- var length int
- for _, v := range p.Tasks {
- length++
- offset += v.FastWriteNocopy(buf[offset:], binaryWriter)
- }
- bthrift.Binary.WriteListBegin(buf[listBeginOffset:], thrift.STRUCT, length)
- offset += bthrift.Binary.WriteListEnd(buf[offset:])
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "request", thrift.STRUCT, 1)
+ offset += p.Request.FastWriteNocopy(buf[offset:], binaryWriter)
offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
return offset
}
-func (p *BackendServiceSubmitRoutineLoadTaskArgs) field1Length() int {
+func (p *BackendServiceSyncLoadForTabletsArgs) field1Length() int {
l := 0
- l += bthrift.Binary.FieldBeginLength("tasks", thrift.LIST, 1)
- l += bthrift.Binary.ListBeginLength(thrift.STRUCT, len(p.Tasks))
- for _, v := range p.Tasks {
- l += v.BLength()
- }
- l += bthrift.Binary.ListEndLength()
+ l += bthrift.Binary.FieldBeginLength("request", thrift.STRUCT, 1)
+ l += p.Request.BLength()
l += bthrift.Binary.FieldEndLength()
return l
}
-func (p *BackendServiceSubmitRoutineLoadTaskResult) FastRead(buf []byte) (int, error) {
+func (p *BackendServiceSyncLoadForTabletsResult) FastRead(buf []byte) (int, error) {
var err error
var offset int
var l int
@@ -8533,7 +16914,7 @@ ReadStructBeginError:
ReadFieldBeginError:
return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
ReadFieldError:
- return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceSubmitRoutineLoadTaskResult[fieldId]), err)
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceSyncLoadForTabletsResult[fieldId]), err)
SkipFieldError:
return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
ReadFieldEndError:
@@ -8542,10 +16923,10 @@ ReadStructEndError:
return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
}
-func (p *BackendServiceSubmitRoutineLoadTaskResult) FastReadField0(buf []byte) (int, error) {
+func (p *BackendServiceSyncLoadForTabletsResult) FastReadField0(buf []byte) (int, error) {
offset := 0
- tmp := status.NewTStatus()
+ tmp := NewTSyncLoadForTabletsResponse()
if l, err := tmp.FastRead(buf[offset:]); err != nil {
return offset, err
} else {
@@ -8556,13 +16937,13 @@ func (p *BackendServiceSubmitRoutineLoadTaskResult) FastReadField0(buf []byte) (
}
// for compatibility
-func (p *BackendServiceSubmitRoutineLoadTaskResult) FastWrite(buf []byte) int {
+func (p *BackendServiceSyncLoadForTabletsResult) FastWrite(buf []byte) int {
return 0
}
-func (p *BackendServiceSubmitRoutineLoadTaskResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+func (p *BackendServiceSyncLoadForTabletsResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int {
offset := 0
- offset += bthrift.Binary.WriteStructBegin(buf[offset:], "submit_routine_load_task_result")
+ offset += bthrift.Binary.WriteStructBegin(buf[offset:], "sync_load_for_tablets_result")
if p != nil {
offset += p.fastWriteField0(buf[offset:], binaryWriter)
}
@@ -8571,9 +16952,9 @@ func (p *BackendServiceSubmitRoutineLoadTaskResult) FastWriteNocopy(buf []byte,
return offset
}
-func (p *BackendServiceSubmitRoutineLoadTaskResult) BLength() int {
+func (p *BackendServiceSyncLoadForTabletsResult) BLength() int {
l := 0
- l += bthrift.Binary.StructBeginLength("submit_routine_load_task_result")
+ l += bthrift.Binary.StructBeginLength("sync_load_for_tablets_result")
if p != nil {
l += p.field0Length()
}
@@ -8582,7 +16963,7 @@ func (p *BackendServiceSubmitRoutineLoadTaskResult) BLength() int {
return l
}
-func (p *BackendServiceSubmitRoutineLoadTaskResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+func (p *BackendServiceSyncLoadForTabletsResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int {
offset := 0
if p.IsSetSuccess() {
offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "success", thrift.STRUCT, 0)
@@ -8592,7 +16973,7 @@ func (p *BackendServiceSubmitRoutineLoadTaskResult) fastWriteField0(buf []byte,
return offset
}
-func (p *BackendServiceSubmitRoutineLoadTaskResult) field0Length() int {
+func (p *BackendServiceSyncLoadForTabletsResult) field0Length() int {
l := 0
if p.IsSetSuccess() {
l += bthrift.Binary.FieldBeginLength("success", thrift.STRUCT, 0)
@@ -8602,7 +16983,7 @@ func (p *BackendServiceSubmitRoutineLoadTaskResult) field0Length() int {
return l
}
-func (p *BackendServiceOpenScannerArgs) FastRead(buf []byte) (int, error) {
+func (p *BackendServiceGetTopNHotPartitionsArgs) FastRead(buf []byte) (int, error) {
var err error
var offset int
var l int
@@ -8664,7 +17045,7 @@ ReadStructBeginError:
ReadFieldBeginError:
return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
ReadFieldError:
- return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceOpenScannerArgs[fieldId]), err)
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceGetTopNHotPartitionsArgs[fieldId]), err)
SkipFieldError:
return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
ReadFieldEndError:
@@ -8673,27 +17054,27 @@ ReadStructEndError:
return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
}
-func (p *BackendServiceOpenScannerArgs) FastReadField1(buf []byte) (int, error) {
+func (p *BackendServiceGetTopNHotPartitionsArgs) FastReadField1(buf []byte) (int, error) {
offset := 0
- tmp := dorisexternalservice.NewTScanOpenParams()
+ tmp := NewTGetTopNHotPartitionsRequest()
if l, err := tmp.FastRead(buf[offset:]); err != nil {
return offset, err
} else {
offset += l
}
- p.Params = tmp
+ p.Request = tmp
return offset, nil
}
// for compatibility
-func (p *BackendServiceOpenScannerArgs) FastWrite(buf []byte) int {
+func (p *BackendServiceGetTopNHotPartitionsArgs) FastWrite(buf []byte) int {
return 0
}
-func (p *BackendServiceOpenScannerArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+func (p *BackendServiceGetTopNHotPartitionsArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int {
offset := 0
- offset += bthrift.Binary.WriteStructBegin(buf[offset:], "open_scanner_args")
+ offset += bthrift.Binary.WriteStructBegin(buf[offset:], "get_top_n_hot_partitions_args")
if p != nil {
offset += p.fastWriteField1(buf[offset:], binaryWriter)
}
@@ -8702,9 +17083,9 @@ func (p *BackendServiceOpenScannerArgs) FastWriteNocopy(buf []byte, binaryWriter
return offset
}
-func (p *BackendServiceOpenScannerArgs) BLength() int {
+func (p *BackendServiceGetTopNHotPartitionsArgs) BLength() int {
l := 0
- l += bthrift.Binary.StructBeginLength("open_scanner_args")
+ l += bthrift.Binary.StructBeginLength("get_top_n_hot_partitions_args")
if p != nil {
l += p.field1Length()
}
@@ -8713,23 +17094,23 @@ func (p *BackendServiceOpenScannerArgs) BLength() int {
return l
}
-func (p *BackendServiceOpenScannerArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+func (p *BackendServiceGetTopNHotPartitionsArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int {
offset := 0
- offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "params", thrift.STRUCT, 1)
- offset += p.Params.FastWriteNocopy(buf[offset:], binaryWriter)
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "request", thrift.STRUCT, 1)
+ offset += p.Request.FastWriteNocopy(buf[offset:], binaryWriter)
offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
return offset
}
-func (p *BackendServiceOpenScannerArgs) field1Length() int {
+func (p *BackendServiceGetTopNHotPartitionsArgs) field1Length() int {
l := 0
- l += bthrift.Binary.FieldBeginLength("params", thrift.STRUCT, 1)
- l += p.Params.BLength()
+ l += bthrift.Binary.FieldBeginLength("request", thrift.STRUCT, 1)
+ l += p.Request.BLength()
l += bthrift.Binary.FieldEndLength()
return l
}
-func (p *BackendServiceOpenScannerResult) FastRead(buf []byte) (int, error) {
+func (p *BackendServiceGetTopNHotPartitionsResult) FastRead(buf []byte) (int, error) {
var err error
var offset int
var l int
@@ -8791,7 +17172,7 @@ ReadStructBeginError:
ReadFieldBeginError:
return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
ReadFieldError:
- return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceOpenScannerResult[fieldId]), err)
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceGetTopNHotPartitionsResult[fieldId]), err)
SkipFieldError:
return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
ReadFieldEndError:
@@ -8800,10 +17181,10 @@ ReadStructEndError:
return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
}
-func (p *BackendServiceOpenScannerResult) FastReadField0(buf []byte) (int, error) {
+func (p *BackendServiceGetTopNHotPartitionsResult) FastReadField0(buf []byte) (int, error) {
offset := 0
- tmp := dorisexternalservice.NewTScanOpenResult_()
+ tmp := NewTGetTopNHotPartitionsResponse()
if l, err := tmp.FastRead(buf[offset:]); err != nil {
return offset, err
} else {
@@ -8814,13 +17195,13 @@ func (p *BackendServiceOpenScannerResult) FastReadField0(buf []byte) (int, error
}
// for compatibility
-func (p *BackendServiceOpenScannerResult) FastWrite(buf []byte) int {
+func (p *BackendServiceGetTopNHotPartitionsResult) FastWrite(buf []byte) int {
return 0
}
-func (p *BackendServiceOpenScannerResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+func (p *BackendServiceGetTopNHotPartitionsResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int {
offset := 0
- offset += bthrift.Binary.WriteStructBegin(buf[offset:], "open_scanner_result")
+ offset += bthrift.Binary.WriteStructBegin(buf[offset:], "get_top_n_hot_partitions_result")
if p != nil {
offset += p.fastWriteField0(buf[offset:], binaryWriter)
}
@@ -8829,9 +17210,9 @@ func (p *BackendServiceOpenScannerResult) FastWriteNocopy(buf []byte, binaryWrit
return offset
}
-func (p *BackendServiceOpenScannerResult) BLength() int {
+func (p *BackendServiceGetTopNHotPartitionsResult) BLength() int {
l := 0
- l += bthrift.Binary.StructBeginLength("open_scanner_result")
+ l += bthrift.Binary.StructBeginLength("get_top_n_hot_partitions_result")
if p != nil {
l += p.field0Length()
}
@@ -8840,7 +17221,7 @@ func (p *BackendServiceOpenScannerResult) BLength() int {
return l
}
-func (p *BackendServiceOpenScannerResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+func (p *BackendServiceGetTopNHotPartitionsResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int {
offset := 0
if p.IsSetSuccess() {
offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "success", thrift.STRUCT, 0)
@@ -8850,7 +17231,7 @@ func (p *BackendServiceOpenScannerResult) fastWriteField0(buf []byte, binaryWrit
return offset
}
-func (p *BackendServiceOpenScannerResult) field0Length() int {
+func (p *BackendServiceGetTopNHotPartitionsResult) field0Length() int {
l := 0
if p.IsSetSuccess() {
l += bthrift.Binary.FieldBeginLength("success", thrift.STRUCT, 0)
@@ -8860,7 +17241,7 @@ func (p *BackendServiceOpenScannerResult) field0Length() int {
return l
}
-func (p *BackendServiceGetNextArgs) FastRead(buf []byte) (int, error) {
+func (p *BackendServiceWarmUpTabletsArgs) FastRead(buf []byte) (int, error) {
var err error
var offset int
var l int
@@ -8922,7 +17303,7 @@ ReadStructBeginError:
ReadFieldBeginError:
return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
ReadFieldError:
- return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceGetNextArgs[fieldId]), err)
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceWarmUpTabletsArgs[fieldId]), err)
SkipFieldError:
return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
ReadFieldEndError:
@@ -8931,27 +17312,27 @@ ReadStructEndError:
return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
}
-func (p *BackendServiceGetNextArgs) FastReadField1(buf []byte) (int, error) {
+func (p *BackendServiceWarmUpTabletsArgs) FastReadField1(buf []byte) (int, error) {
offset := 0
- tmp := dorisexternalservice.NewTScanNextBatchParams()
+ tmp := NewTWarmUpTabletsRequest()
if l, err := tmp.FastRead(buf[offset:]); err != nil {
return offset, err
} else {
offset += l
}
- p.Params = tmp
+ p.Request = tmp
return offset, nil
}
// for compatibility
-func (p *BackendServiceGetNextArgs) FastWrite(buf []byte) int {
+func (p *BackendServiceWarmUpTabletsArgs) FastWrite(buf []byte) int {
return 0
}
-func (p *BackendServiceGetNextArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+func (p *BackendServiceWarmUpTabletsArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int {
offset := 0
- offset += bthrift.Binary.WriteStructBegin(buf[offset:], "get_next_args")
+ offset += bthrift.Binary.WriteStructBegin(buf[offset:], "warm_up_tablets_args")
if p != nil {
offset += p.fastWriteField1(buf[offset:], binaryWriter)
}
@@ -8960,9 +17341,9 @@ func (p *BackendServiceGetNextArgs) FastWriteNocopy(buf []byte, binaryWriter bth
return offset
}
-func (p *BackendServiceGetNextArgs) BLength() int {
+func (p *BackendServiceWarmUpTabletsArgs) BLength() int {
l := 0
- l += bthrift.Binary.StructBeginLength("get_next_args")
+ l += bthrift.Binary.StructBeginLength("warm_up_tablets_args")
if p != nil {
l += p.field1Length()
}
@@ -8971,23 +17352,23 @@ func (p *BackendServiceGetNextArgs) BLength() int {
return l
}
-func (p *BackendServiceGetNextArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+func (p *BackendServiceWarmUpTabletsArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int {
offset := 0
- offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "params", thrift.STRUCT, 1)
- offset += p.Params.FastWriteNocopy(buf[offset:], binaryWriter)
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "request", thrift.STRUCT, 1)
+ offset += p.Request.FastWriteNocopy(buf[offset:], binaryWriter)
offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
return offset
}
-func (p *BackendServiceGetNextArgs) field1Length() int {
+func (p *BackendServiceWarmUpTabletsArgs) field1Length() int {
l := 0
- l += bthrift.Binary.FieldBeginLength("params", thrift.STRUCT, 1)
- l += p.Params.BLength()
+ l += bthrift.Binary.FieldBeginLength("request", thrift.STRUCT, 1)
+ l += p.Request.BLength()
l += bthrift.Binary.FieldEndLength()
return l
}
-func (p *BackendServiceGetNextResult) FastRead(buf []byte) (int, error) {
+func (p *BackendServiceWarmUpTabletsResult) FastRead(buf []byte) (int, error) {
var err error
var offset int
var l int
@@ -9049,7 +17430,7 @@ ReadStructBeginError:
ReadFieldBeginError:
return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
ReadFieldError:
- return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceGetNextResult[fieldId]), err)
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceWarmUpTabletsResult[fieldId]), err)
SkipFieldError:
return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
ReadFieldEndError:
@@ -9058,10 +17439,10 @@ ReadStructEndError:
return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
}
-func (p *BackendServiceGetNextResult) FastReadField0(buf []byte) (int, error) {
+func (p *BackendServiceWarmUpTabletsResult) FastReadField0(buf []byte) (int, error) {
offset := 0
- tmp := dorisexternalservice.NewTScanBatchResult_()
+ tmp := NewTWarmUpTabletsResponse()
if l, err := tmp.FastRead(buf[offset:]); err != nil {
return offset, err
} else {
@@ -9072,13 +17453,13 @@ func (p *BackendServiceGetNextResult) FastReadField0(buf []byte) (int, error) {
}
// for compatibility
-func (p *BackendServiceGetNextResult) FastWrite(buf []byte) int {
+func (p *BackendServiceWarmUpTabletsResult) FastWrite(buf []byte) int {
return 0
}
-func (p *BackendServiceGetNextResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+func (p *BackendServiceWarmUpTabletsResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int {
offset := 0
- offset += bthrift.Binary.WriteStructBegin(buf[offset:], "get_next_result")
+ offset += bthrift.Binary.WriteStructBegin(buf[offset:], "warm_up_tablets_result")
if p != nil {
offset += p.fastWriteField0(buf[offset:], binaryWriter)
}
@@ -9087,9 +17468,9 @@ func (p *BackendServiceGetNextResult) FastWriteNocopy(buf []byte, binaryWriter b
return offset
}
-func (p *BackendServiceGetNextResult) BLength() int {
+func (p *BackendServiceWarmUpTabletsResult) BLength() int {
l := 0
- l += bthrift.Binary.StructBeginLength("get_next_result")
+ l += bthrift.Binary.StructBeginLength("warm_up_tablets_result")
if p != nil {
l += p.field0Length()
}
@@ -9098,7 +17479,7 @@ func (p *BackendServiceGetNextResult) BLength() int {
return l
}
-func (p *BackendServiceGetNextResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+func (p *BackendServiceWarmUpTabletsResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int {
offset := 0
if p.IsSetSuccess() {
offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "success", thrift.STRUCT, 0)
@@ -9108,7 +17489,7 @@ func (p *BackendServiceGetNextResult) fastWriteField0(buf []byte, binaryWriter b
return offset
}
-func (p *BackendServiceGetNextResult) field0Length() int {
+func (p *BackendServiceWarmUpTabletsResult) field0Length() int {
l := 0
if p.IsSetSuccess() {
l += bthrift.Binary.FieldBeginLength("success", thrift.STRUCT, 0)
@@ -9118,7 +17499,7 @@ func (p *BackendServiceGetNextResult) field0Length() int {
return l
}
-func (p *BackendServiceCloseScannerArgs) FastRead(buf []byte) (int, error) {
+func (p *BackendServiceIngestBinlogArgs) FastRead(buf []byte) (int, error) {
var err error
var offset int
var l int
@@ -9180,7 +17561,7 @@ ReadStructBeginError:
ReadFieldBeginError:
return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
ReadFieldError:
- return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceCloseScannerArgs[fieldId]), err)
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceIngestBinlogArgs[fieldId]), err)
SkipFieldError:
return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
ReadFieldEndError:
@@ -9189,27 +17570,27 @@ ReadStructEndError:
return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
}
-func (p *BackendServiceCloseScannerArgs) FastReadField1(buf []byte) (int, error) {
+func (p *BackendServiceIngestBinlogArgs) FastReadField1(buf []byte) (int, error) {
offset := 0
- tmp := dorisexternalservice.NewTScanCloseParams()
+ tmp := NewTIngestBinlogRequest()
if l, err := tmp.FastRead(buf[offset:]); err != nil {
return offset, err
} else {
offset += l
}
- p.Params = tmp
+ p.IngestBinlogRequest = tmp
return offset, nil
}
// for compatibility
-func (p *BackendServiceCloseScannerArgs) FastWrite(buf []byte) int {
+func (p *BackendServiceIngestBinlogArgs) FastWrite(buf []byte) int {
return 0
}
-func (p *BackendServiceCloseScannerArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+func (p *BackendServiceIngestBinlogArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int {
offset := 0
- offset += bthrift.Binary.WriteStructBegin(buf[offset:], "close_scanner_args")
+ offset += bthrift.Binary.WriteStructBegin(buf[offset:], "ingest_binlog_args")
if p != nil {
offset += p.fastWriteField1(buf[offset:], binaryWriter)
}
@@ -9218,9 +17599,9 @@ func (p *BackendServiceCloseScannerArgs) FastWriteNocopy(buf []byte, binaryWrite
return offset
}
-func (p *BackendServiceCloseScannerArgs) BLength() int {
+func (p *BackendServiceIngestBinlogArgs) BLength() int {
l := 0
- l += bthrift.Binary.StructBeginLength("close_scanner_args")
+ l += bthrift.Binary.StructBeginLength("ingest_binlog_args")
if p != nil {
l += p.field1Length()
}
@@ -9229,23 +17610,23 @@ func (p *BackendServiceCloseScannerArgs) BLength() int {
return l
}
-func (p *BackendServiceCloseScannerArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+func (p *BackendServiceIngestBinlogArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int {
offset := 0
- offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "params", thrift.STRUCT, 1)
- offset += p.Params.FastWriteNocopy(buf[offset:], binaryWriter)
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "ingest_binlog_request", thrift.STRUCT, 1)
+ offset += p.IngestBinlogRequest.FastWriteNocopy(buf[offset:], binaryWriter)
offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
return offset
}
-func (p *BackendServiceCloseScannerArgs) field1Length() int {
+func (p *BackendServiceIngestBinlogArgs) field1Length() int {
l := 0
- l += bthrift.Binary.FieldBeginLength("params", thrift.STRUCT, 1)
- l += p.Params.BLength()
+ l += bthrift.Binary.FieldBeginLength("ingest_binlog_request", thrift.STRUCT, 1)
+ l += p.IngestBinlogRequest.BLength()
l += bthrift.Binary.FieldEndLength()
return l
}
-func (p *BackendServiceCloseScannerResult) FastRead(buf []byte) (int, error) {
+func (p *BackendServiceIngestBinlogResult) FastRead(buf []byte) (int, error) {
var err error
var offset int
var l int
@@ -9307,7 +17688,7 @@ ReadStructBeginError:
ReadFieldBeginError:
return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
ReadFieldError:
- return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceCloseScannerResult[fieldId]), err)
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceIngestBinlogResult[fieldId]), err)
SkipFieldError:
return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
ReadFieldEndError:
@@ -9316,10 +17697,10 @@ ReadStructEndError:
return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
}
-func (p *BackendServiceCloseScannerResult) FastReadField0(buf []byte) (int, error) {
+func (p *BackendServiceIngestBinlogResult) FastReadField0(buf []byte) (int, error) {
offset := 0
- tmp := dorisexternalservice.NewTScanCloseResult_()
+ tmp := NewTIngestBinlogResult_()
if l, err := tmp.FastRead(buf[offset:]); err != nil {
return offset, err
} else {
@@ -9330,13 +17711,13 @@ func (p *BackendServiceCloseScannerResult) FastReadField0(buf []byte) (int, erro
}
// for compatibility
-func (p *BackendServiceCloseScannerResult) FastWrite(buf []byte) int {
+func (p *BackendServiceIngestBinlogResult) FastWrite(buf []byte) int {
return 0
}
-func (p *BackendServiceCloseScannerResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+func (p *BackendServiceIngestBinlogResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int {
offset := 0
- offset += bthrift.Binary.WriteStructBegin(buf[offset:], "close_scanner_result")
+ offset += bthrift.Binary.WriteStructBegin(buf[offset:], "ingest_binlog_result")
if p != nil {
offset += p.fastWriteField0(buf[offset:], binaryWriter)
}
@@ -9345,9 +17726,9 @@ func (p *BackendServiceCloseScannerResult) FastWriteNocopy(buf []byte, binaryWri
return offset
}
-func (p *BackendServiceCloseScannerResult) BLength() int {
+func (p *BackendServiceIngestBinlogResult) BLength() int {
l := 0
- l += bthrift.Binary.StructBeginLength("close_scanner_result")
+ l += bthrift.Binary.StructBeginLength("ingest_binlog_result")
if p != nil {
l += p.field0Length()
}
@@ -9356,7 +17737,7 @@ func (p *BackendServiceCloseScannerResult) BLength() int {
return l
}
-func (p *BackendServiceCloseScannerResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+func (p *BackendServiceIngestBinlogResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int {
offset := 0
if p.IsSetSuccess() {
offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "success", thrift.STRUCT, 0)
@@ -9366,7 +17747,7 @@ func (p *BackendServiceCloseScannerResult) fastWriteField0(buf []byte, binaryWri
return offset
}
-func (p *BackendServiceCloseScannerResult) field0Length() int {
+func (p *BackendServiceIngestBinlogResult) field0Length() int {
l := 0
if p.IsSetSuccess() {
l += bthrift.Binary.FieldBeginLength("success", thrift.STRUCT, 0)
@@ -9376,7 +17757,7 @@ func (p *BackendServiceCloseScannerResult) field0Length() int {
return l
}
-func (p *BackendServiceGetStreamLoadRecordArgs) FastRead(buf []byte) (int, error) {
+func (p *BackendServiceQueryIngestBinlogArgs) FastRead(buf []byte) (int, error) {
var err error
var offset int
var l int
@@ -9399,7 +17780,7 @@ func (p *BackendServiceGetStreamLoadRecordArgs) FastRead(buf []byte) (int, error
}
switch fieldId {
case 1:
- if fieldTypeId == thrift.I64 {
+ if fieldTypeId == thrift.STRUCT {
l, err = p.FastReadField1(buf[offset:])
offset += l
if err != nil {
@@ -9438,7 +17819,7 @@ ReadStructBeginError:
ReadFieldBeginError:
return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
ReadFieldError:
- return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceGetStreamLoadRecordArgs[fieldId]), err)
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceQueryIngestBinlogArgs[fieldId]), err)
SkipFieldError:
return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
ReadFieldEndError:
@@ -9447,28 +17828,27 @@ ReadStructEndError:
return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
}
-func (p *BackendServiceGetStreamLoadRecordArgs) FastReadField1(buf []byte) (int, error) {
+func (p *BackendServiceQueryIngestBinlogArgs) FastReadField1(buf []byte) (int, error) {
offset := 0
- if v, l, err := bthrift.Binary.ReadI64(buf[offset:]); err != nil {
+ tmp := NewTQueryIngestBinlogRequest()
+ if l, err := tmp.FastRead(buf[offset:]); err != nil {
return offset, err
} else {
offset += l
-
- p.LastStreamRecordTime = v
-
}
+ p.QueryIngestBinlogRequest = tmp
return offset, nil
}
// for compatibility
-func (p *BackendServiceGetStreamLoadRecordArgs) FastWrite(buf []byte) int {
+func (p *BackendServiceQueryIngestBinlogArgs) FastWrite(buf []byte) int {
return 0
}
-func (p *BackendServiceGetStreamLoadRecordArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+func (p *BackendServiceQueryIngestBinlogArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int {
offset := 0
- offset += bthrift.Binary.WriteStructBegin(buf[offset:], "get_stream_load_record_args")
+ offset += bthrift.Binary.WriteStructBegin(buf[offset:], "query_ingest_binlog_args")
if p != nil {
offset += p.fastWriteField1(buf[offset:], binaryWriter)
}
@@ -9477,9 +17857,9 @@ func (p *BackendServiceGetStreamLoadRecordArgs) FastWriteNocopy(buf []byte, bina
return offset
}
-func (p *BackendServiceGetStreamLoadRecordArgs) BLength() int {
+func (p *BackendServiceQueryIngestBinlogArgs) BLength() int {
l := 0
- l += bthrift.Binary.StructBeginLength("get_stream_load_record_args")
+ l += bthrift.Binary.StructBeginLength("query_ingest_binlog_args")
if p != nil {
l += p.field1Length()
}
@@ -9488,25 +17868,23 @@ func (p *BackendServiceGetStreamLoadRecordArgs) BLength() int {
return l
}
-func (p *BackendServiceGetStreamLoadRecordArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+func (p *BackendServiceQueryIngestBinlogArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int {
offset := 0
- offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "last_stream_record_time", thrift.I64, 1)
- offset += bthrift.Binary.WriteI64(buf[offset:], p.LastStreamRecordTime)
-
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "query_ingest_binlog_request", thrift.STRUCT, 1)
+ offset += p.QueryIngestBinlogRequest.FastWriteNocopy(buf[offset:], binaryWriter)
offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
return offset
}
-func (p *BackendServiceGetStreamLoadRecordArgs) field1Length() int {
+func (p *BackendServiceQueryIngestBinlogArgs) field1Length() int {
l := 0
- l += bthrift.Binary.FieldBeginLength("last_stream_record_time", thrift.I64, 1)
- l += bthrift.Binary.I64Length(p.LastStreamRecordTime)
-
+ l += bthrift.Binary.FieldBeginLength("query_ingest_binlog_request", thrift.STRUCT, 1)
+ l += p.QueryIngestBinlogRequest.BLength()
l += bthrift.Binary.FieldEndLength()
return l
}
-func (p *BackendServiceGetStreamLoadRecordResult) FastRead(buf []byte) (int, error) {
+func (p *BackendServiceQueryIngestBinlogResult) FastRead(buf []byte) (int, error) {
var err error
var offset int
var l int
@@ -9568,7 +17946,7 @@ ReadStructBeginError:
ReadFieldBeginError:
return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
ReadFieldError:
- return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceGetStreamLoadRecordResult[fieldId]), err)
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceQueryIngestBinlogResult[fieldId]), err)
SkipFieldError:
return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
ReadFieldEndError:
@@ -9577,10 +17955,10 @@ ReadStructEndError:
return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
}
-func (p *BackendServiceGetStreamLoadRecordResult) FastReadField0(buf []byte) (int, error) {
+func (p *BackendServiceQueryIngestBinlogResult) FastReadField0(buf []byte) (int, error) {
offset := 0
- tmp := NewTStreamLoadRecordResult_()
+ tmp := NewTQueryIngestBinlogResult_()
if l, err := tmp.FastRead(buf[offset:]); err != nil {
return offset, err
} else {
@@ -9591,13 +17969,13 @@ func (p *BackendServiceGetStreamLoadRecordResult) FastReadField0(buf []byte) (in
}
// for compatibility
-func (p *BackendServiceGetStreamLoadRecordResult) FastWrite(buf []byte) int {
+func (p *BackendServiceQueryIngestBinlogResult) FastWrite(buf []byte) int {
return 0
}
-func (p *BackendServiceGetStreamLoadRecordResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+func (p *BackendServiceQueryIngestBinlogResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int {
offset := 0
- offset += bthrift.Binary.WriteStructBegin(buf[offset:], "get_stream_load_record_result")
+ offset += bthrift.Binary.WriteStructBegin(buf[offset:], "query_ingest_binlog_result")
if p != nil {
offset += p.fastWriteField0(buf[offset:], binaryWriter)
}
@@ -9606,9 +17984,9 @@ func (p *BackendServiceGetStreamLoadRecordResult) FastWriteNocopy(buf []byte, bi
return offset
}
-func (p *BackendServiceGetStreamLoadRecordResult) BLength() int {
+func (p *BackendServiceQueryIngestBinlogResult) BLength() int {
l := 0
- l += bthrift.Binary.StructBeginLength("get_stream_load_record_result")
+ l += bthrift.Binary.StructBeginLength("query_ingest_binlog_result")
if p != nil {
l += p.field0Length()
}
@@ -9617,7 +17995,7 @@ func (p *BackendServiceGetStreamLoadRecordResult) BLength() int {
return l
}
-func (p *BackendServiceGetStreamLoadRecordResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+func (p *BackendServiceQueryIngestBinlogResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int {
offset := 0
if p.IsSetSuccess() {
offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "success", thrift.STRUCT, 0)
@@ -9627,7 +18005,7 @@ func (p *BackendServiceGetStreamLoadRecordResult) fastWriteField0(buf []byte, bi
return offset
}
-func (p *BackendServiceGetStreamLoadRecordResult) field0Length() int {
+func (p *BackendServiceQueryIngestBinlogResult) field0Length() int {
l := 0
if p.IsSetSuccess() {
l += bthrift.Binary.FieldBeginLength("success", thrift.STRUCT, 0)
@@ -9637,7 +18015,7 @@ func (p *BackendServiceGetStreamLoadRecordResult) field0Length() int {
return l
}
-func (p *BackendServiceCleanTrashArgs) FastRead(buf []byte) (int, error) {
+func (p *BackendServicePublishTopicInfoArgs) FastRead(buf []byte) (int, error) {
var err error
var offset int
var l int
@@ -9658,10 +18036,27 @@ func (p *BackendServiceCleanTrashArgs) FastRead(buf []byte) (int, error) {
if fieldTypeId == thrift.STOP {
break
}
- l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
- offset += l
- if err != nil {
- goto SkipFieldTypeError
+ switch fieldId {
+ case 1:
+ if fieldTypeId == thrift.STRUCT {
+ l, err = p.FastReadField1(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
+ default:
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
}
l, err = bthrift.Binary.ReadFieldEnd(buf[offset:])
@@ -9681,119 +18076,73 @@ ReadStructBeginError:
return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err)
ReadFieldBeginError:
return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
-
-SkipFieldTypeError:
- return offset, thrift.PrependError(fmt.Sprintf("%T skip field type %d error", p, fieldTypeId), err)
+ReadFieldError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServicePublishTopicInfoArgs[fieldId]), err)
+SkipFieldError:
+ return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
ReadFieldEndError:
return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err)
ReadStructEndError:
return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
}
+func (p *BackendServicePublishTopicInfoArgs) FastReadField1(buf []byte) (int, error) {
+ offset := 0
+
+ tmp := NewTPublishTopicRequest()
+ if l, err := tmp.FastRead(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+ }
+ p.TopicRequest = tmp
+ return offset, nil
+}
+
// for compatibility
-func (p *BackendServiceCleanTrashArgs) FastWrite(buf []byte) int {
+func (p *BackendServicePublishTopicInfoArgs) FastWrite(buf []byte) int {
return 0
}
-func (p *BackendServiceCleanTrashArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+func (p *BackendServicePublishTopicInfoArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int {
offset := 0
- offset += bthrift.Binary.WriteStructBegin(buf[offset:], "clean_trash_args")
+ offset += bthrift.Binary.WriteStructBegin(buf[offset:], "publish_topic_info_args")
if p != nil {
+ offset += p.fastWriteField1(buf[offset:], binaryWriter)
}
offset += bthrift.Binary.WriteFieldStop(buf[offset:])
offset += bthrift.Binary.WriteStructEnd(buf[offset:])
return offset
}
-func (p *BackendServiceCleanTrashArgs) BLength() int {
+func (p *BackendServicePublishTopicInfoArgs) BLength() int {
l := 0
- l += bthrift.Binary.StructBeginLength("clean_trash_args")
+ l += bthrift.Binary.StructBeginLength("publish_topic_info_args")
if p != nil {
+ l += p.field1Length()
}
l += bthrift.Binary.FieldStopLength()
l += bthrift.Binary.StructEndLength()
return l
}
-func (p *BackendServiceCheckStorageFormatArgs) FastRead(buf []byte) (int, error) {
- var err error
- var offset int
- var l int
- var fieldTypeId thrift.TType
- var fieldId int16
- _, l, err = bthrift.Binary.ReadStructBegin(buf)
- offset += l
- if err != nil {
- goto ReadStructBeginError
- }
-
- for {
- _, fieldTypeId, fieldId, l, err = bthrift.Binary.ReadFieldBegin(buf[offset:])
- offset += l
- if err != nil {
- goto ReadFieldBeginError
- }
- if fieldTypeId == thrift.STOP {
- break
- }
- l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
- offset += l
- if err != nil {
- goto SkipFieldTypeError
- }
-
- l, err = bthrift.Binary.ReadFieldEnd(buf[offset:])
- offset += l
- if err != nil {
- goto ReadFieldEndError
- }
- }
- l, err = bthrift.Binary.ReadStructEnd(buf[offset:])
- offset += l
- if err != nil {
- goto ReadStructEndError
- }
-
- return offset, nil
-ReadStructBeginError:
- return offset, thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err)
-ReadFieldBeginError:
- return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
-
-SkipFieldTypeError:
- return offset, thrift.PrependError(fmt.Sprintf("%T skip field type %d error", p, fieldTypeId), err)
-ReadFieldEndError:
- return offset, thrift.PrependError(fmt.Sprintf("%T read field end error", p), err)
-ReadStructEndError:
- return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
-}
-
-// for compatibility
-func (p *BackendServiceCheckStorageFormatArgs) FastWrite(buf []byte) int {
- return 0
-}
-
-func (p *BackendServiceCheckStorageFormatArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+func (p *BackendServicePublishTopicInfoArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int {
offset := 0
- offset += bthrift.Binary.WriteStructBegin(buf[offset:], "check_storage_format_args")
- if p != nil {
- }
- offset += bthrift.Binary.WriteFieldStop(buf[offset:])
- offset += bthrift.Binary.WriteStructEnd(buf[offset:])
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "topic_request", thrift.STRUCT, 1)
+ offset += p.TopicRequest.FastWriteNocopy(buf[offset:], binaryWriter)
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
return offset
}
-func (p *BackendServiceCheckStorageFormatArgs) BLength() int {
+func (p *BackendServicePublishTopicInfoArgs) field1Length() int {
l := 0
- l += bthrift.Binary.StructBeginLength("check_storage_format_args")
- if p != nil {
- }
- l += bthrift.Binary.FieldStopLength()
- l += bthrift.Binary.StructEndLength()
+ l += bthrift.Binary.FieldBeginLength("topic_request", thrift.STRUCT, 1)
+ l += p.TopicRequest.BLength()
+ l += bthrift.Binary.FieldEndLength()
return l
}
-func (p *BackendServiceCheckStorageFormatResult) FastRead(buf []byte) (int, error) {
+func (p *BackendServicePublishTopicInfoResult) FastRead(buf []byte) (int, error) {
var err error
var offset int
var l int
@@ -9855,7 +18204,7 @@ ReadStructBeginError:
ReadFieldBeginError:
return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
ReadFieldError:
- return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceCheckStorageFormatResult[fieldId]), err)
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServicePublishTopicInfoResult[fieldId]), err)
SkipFieldError:
return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
ReadFieldEndError:
@@ -9864,10 +18213,10 @@ ReadStructEndError:
return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
}
-func (p *BackendServiceCheckStorageFormatResult) FastReadField0(buf []byte) (int, error) {
+func (p *BackendServicePublishTopicInfoResult) FastReadField0(buf []byte) (int, error) {
offset := 0
- tmp := NewTCheckStorageFormatResult_()
+ tmp := NewTPublishTopicResult_()
if l, err := tmp.FastRead(buf[offset:]); err != nil {
return offset, err
} else {
@@ -9878,13 +18227,13 @@ func (p *BackendServiceCheckStorageFormatResult) FastReadField0(buf []byte) (int
}
// for compatibility
-func (p *BackendServiceCheckStorageFormatResult) FastWrite(buf []byte) int {
+func (p *BackendServicePublishTopicInfoResult) FastWrite(buf []byte) int {
return 0
}
-func (p *BackendServiceCheckStorageFormatResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+func (p *BackendServicePublishTopicInfoResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int {
offset := 0
- offset += bthrift.Binary.WriteStructBegin(buf[offset:], "check_storage_format_result")
+ offset += bthrift.Binary.WriteStructBegin(buf[offset:], "publish_topic_info_result")
if p != nil {
offset += p.fastWriteField0(buf[offset:], binaryWriter)
}
@@ -9893,9 +18242,9 @@ func (p *BackendServiceCheckStorageFormatResult) FastWriteNocopy(buf []byte, bin
return offset
}
-func (p *BackendServiceCheckStorageFormatResult) BLength() int {
+func (p *BackendServicePublishTopicInfoResult) BLength() int {
l := 0
- l += bthrift.Binary.StructBeginLength("check_storage_format_result")
+ l += bthrift.Binary.StructBeginLength("publish_topic_info_result")
if p != nil {
l += p.field0Length()
}
@@ -9904,7 +18253,7 @@ func (p *BackendServiceCheckStorageFormatResult) BLength() int {
return l
}
-func (p *BackendServiceCheckStorageFormatResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+func (p *BackendServicePublishTopicInfoResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int {
offset := 0
if p.IsSetSuccess() {
offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "success", thrift.STRUCT, 0)
@@ -9914,7 +18263,7 @@ func (p *BackendServiceCheckStorageFormatResult) fastWriteField0(buf []byte, bin
return offset
}
-func (p *BackendServiceCheckStorageFormatResult) field0Length() int {
+func (p *BackendServicePublishTopicInfoResult) field0Length() int {
l := 0
if p.IsSetSuccess() {
l += bthrift.Binary.FieldBeginLength("success", thrift.STRUCT, 0)
@@ -9924,7 +18273,7 @@ func (p *BackendServiceCheckStorageFormatResult) field0Length() int {
return l
}
-func (p *BackendServiceIngestBinlogArgs) FastRead(buf []byte) (int, error) {
+func (p *BackendServiceGetRealtimeExecStatusArgs) FastRead(buf []byte) (int, error) {
var err error
var offset int
var l int
@@ -9986,7 +18335,7 @@ ReadStructBeginError:
ReadFieldBeginError:
return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
ReadFieldError:
- return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceIngestBinlogArgs[fieldId]), err)
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceGetRealtimeExecStatusArgs[fieldId]), err)
SkipFieldError:
return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
ReadFieldEndError:
@@ -9995,27 +18344,27 @@ ReadStructEndError:
return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
}
-func (p *BackendServiceIngestBinlogArgs) FastReadField1(buf []byte) (int, error) {
+func (p *BackendServiceGetRealtimeExecStatusArgs) FastReadField1(buf []byte) (int, error) {
offset := 0
- tmp := NewTIngestBinlogRequest()
+ tmp := NewTGetRealtimeExecStatusRequest()
if l, err := tmp.FastRead(buf[offset:]); err != nil {
return offset, err
} else {
offset += l
}
- p.IngestBinlogRequest = tmp
+ p.Request = tmp
return offset, nil
}
// for compatibility
-func (p *BackendServiceIngestBinlogArgs) FastWrite(buf []byte) int {
+func (p *BackendServiceGetRealtimeExecStatusArgs) FastWrite(buf []byte) int {
return 0
}
-func (p *BackendServiceIngestBinlogArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+func (p *BackendServiceGetRealtimeExecStatusArgs) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int {
offset := 0
- offset += bthrift.Binary.WriteStructBegin(buf[offset:], "ingest_binlog_args")
+ offset += bthrift.Binary.WriteStructBegin(buf[offset:], "get_realtime_exec_status_args")
if p != nil {
offset += p.fastWriteField1(buf[offset:], binaryWriter)
}
@@ -10024,9 +18373,9 @@ func (p *BackendServiceIngestBinlogArgs) FastWriteNocopy(buf []byte, binaryWrite
return offset
}
-func (p *BackendServiceIngestBinlogArgs) BLength() int {
+func (p *BackendServiceGetRealtimeExecStatusArgs) BLength() int {
l := 0
- l += bthrift.Binary.StructBeginLength("ingest_binlog_args")
+ l += bthrift.Binary.StructBeginLength("get_realtime_exec_status_args")
if p != nil {
l += p.field1Length()
}
@@ -10035,23 +18384,23 @@ func (p *BackendServiceIngestBinlogArgs) BLength() int {
return l
}
-func (p *BackendServiceIngestBinlogArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+func (p *BackendServiceGetRealtimeExecStatusArgs) fastWriteField1(buf []byte, binaryWriter bthrift.BinaryWriter) int {
offset := 0
- offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "ingest_binlog_request", thrift.STRUCT, 1)
- offset += p.IngestBinlogRequest.FastWriteNocopy(buf[offset:], binaryWriter)
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "request", thrift.STRUCT, 1)
+ offset += p.Request.FastWriteNocopy(buf[offset:], binaryWriter)
offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
return offset
}
-func (p *BackendServiceIngestBinlogArgs) field1Length() int {
+func (p *BackendServiceGetRealtimeExecStatusArgs) field1Length() int {
l := 0
- l += bthrift.Binary.FieldBeginLength("ingest_binlog_request", thrift.STRUCT, 1)
- l += p.IngestBinlogRequest.BLength()
+ l += bthrift.Binary.FieldBeginLength("request", thrift.STRUCT, 1)
+ l += p.Request.BLength()
l += bthrift.Binary.FieldEndLength()
return l
}
-func (p *BackendServiceIngestBinlogResult) FastRead(buf []byte) (int, error) {
+func (p *BackendServiceGetRealtimeExecStatusResult) FastRead(buf []byte) (int, error) {
var err error
var offset int
var l int
@@ -10113,7 +18462,7 @@ ReadStructBeginError:
ReadFieldBeginError:
return offset, thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
ReadFieldError:
- return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceIngestBinlogResult[fieldId]), err)
+ return offset, thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_BackendServiceGetRealtimeExecStatusResult[fieldId]), err)
SkipFieldError:
return offset, thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
ReadFieldEndError:
@@ -10122,10 +18471,10 @@ ReadStructEndError:
return offset, thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
}
-func (p *BackendServiceIngestBinlogResult) FastReadField0(buf []byte) (int, error) {
+func (p *BackendServiceGetRealtimeExecStatusResult) FastReadField0(buf []byte) (int, error) {
offset := 0
- tmp := NewTIngestBinlogResult_()
+ tmp := NewTGetRealtimeExecStatusResponse()
if l, err := tmp.FastRead(buf[offset:]); err != nil {
return offset, err
} else {
@@ -10136,13 +18485,13 @@ func (p *BackendServiceIngestBinlogResult) FastReadField0(buf []byte) (int, erro
}
// for compatibility
-func (p *BackendServiceIngestBinlogResult) FastWrite(buf []byte) int {
+func (p *BackendServiceGetRealtimeExecStatusResult) FastWrite(buf []byte) int {
return 0
}
-func (p *BackendServiceIngestBinlogResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+func (p *BackendServiceGetRealtimeExecStatusResult) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) int {
offset := 0
- offset += bthrift.Binary.WriteStructBegin(buf[offset:], "ingest_binlog_result")
+ offset += bthrift.Binary.WriteStructBegin(buf[offset:], "get_realtime_exec_status_result")
if p != nil {
offset += p.fastWriteField0(buf[offset:], binaryWriter)
}
@@ -10151,9 +18500,9 @@ func (p *BackendServiceIngestBinlogResult) FastWriteNocopy(buf []byte, binaryWri
return offset
}
-func (p *BackendServiceIngestBinlogResult) BLength() int {
+func (p *BackendServiceGetRealtimeExecStatusResult) BLength() int {
l := 0
- l += bthrift.Binary.StructBeginLength("ingest_binlog_result")
+ l += bthrift.Binary.StructBeginLength("get_realtime_exec_status_result")
if p != nil {
l += p.field0Length()
}
@@ -10162,7 +18511,7 @@ func (p *BackendServiceIngestBinlogResult) BLength() int {
return l
}
-func (p *BackendServiceIngestBinlogResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+func (p *BackendServiceGetRealtimeExecStatusResult) fastWriteField0(buf []byte, binaryWriter bthrift.BinaryWriter) int {
offset := 0
if p.IsSetSuccess() {
offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "success", thrift.STRUCT, 0)
@@ -10172,7 +18521,7 @@ func (p *BackendServiceIngestBinlogResult) fastWriteField0(buf []byte, binaryWri
return offset
}
-func (p *BackendServiceIngestBinlogResult) field0Length() int {
+func (p *BackendServiceGetRealtimeExecStatusResult) field0Length() int {
l := 0
if p.IsSetSuccess() {
l += bthrift.Binary.FieldBeginLength("success", thrift.STRUCT, 0)
@@ -10326,10 +18675,6 @@ func (p *BackendServiceGetStreamLoadRecordResult) GetResult() interface{} {
return p.Success
}
-func (p *BackendServiceCleanTrashArgs) GetFirstArgument() interface{} {
- return nil
-}
-
func (p *BackendServiceCheckStorageFormatArgs) GetFirstArgument() interface{} {
return nil
}
@@ -10338,6 +18683,46 @@ func (p *BackendServiceCheckStorageFormatResult) GetResult() interface{} {
return p.Success
}
+func (p *BackendServiceWarmUpCacheAsyncArgs) GetFirstArgument() interface{} {
+ return p.Request
+}
+
+func (p *BackendServiceWarmUpCacheAsyncResult) GetResult() interface{} {
+ return p.Success
+}
+
+func (p *BackendServiceCheckWarmUpCacheAsyncArgs) GetFirstArgument() interface{} {
+ return p.Request
+}
+
+func (p *BackendServiceCheckWarmUpCacheAsyncResult) GetResult() interface{} {
+ return p.Success
+}
+
+func (p *BackendServiceSyncLoadForTabletsArgs) GetFirstArgument() interface{} {
+ return p.Request
+}
+
+func (p *BackendServiceSyncLoadForTabletsResult) GetResult() interface{} {
+ return p.Success
+}
+
+func (p *BackendServiceGetTopNHotPartitionsArgs) GetFirstArgument() interface{} {
+ return p.Request
+}
+
+func (p *BackendServiceGetTopNHotPartitionsResult) GetResult() interface{} {
+ return p.Success
+}
+
+func (p *BackendServiceWarmUpTabletsArgs) GetFirstArgument() interface{} {
+ return p.Request
+}
+
+func (p *BackendServiceWarmUpTabletsResult) GetResult() interface{} {
+ return p.Success
+}
+
func (p *BackendServiceIngestBinlogArgs) GetFirstArgument() interface{} {
return p.IngestBinlogRequest
}
@@ -10345,3 +18730,27 @@ func (p *BackendServiceIngestBinlogArgs) GetFirstArgument() interface{} {
func (p *BackendServiceIngestBinlogResult) GetResult() interface{} {
return p.Success
}
+
+func (p *BackendServiceQueryIngestBinlogArgs) GetFirstArgument() interface{} {
+ return p.QueryIngestBinlogRequest
+}
+
+func (p *BackendServiceQueryIngestBinlogResult) GetResult() interface{} {
+ return p.Success
+}
+
+func (p *BackendServicePublishTopicInfoArgs) GetFirstArgument() interface{} {
+ return p.TopicRequest
+}
+
+func (p *BackendServicePublishTopicInfoResult) GetResult() interface{} {
+ return p.Success
+}
+
+func (p *BackendServiceGetRealtimeExecStatusArgs) GetFirstArgument() interface{} {
+ return p.Request
+}
+
+func (p *BackendServiceGetRealtimeExecStatusResult) GetResult() interface{} {
+ return p.Success
+}
diff --git a/pkg/rpc/kitex_gen/data/Data.go b/pkg/rpc/kitex_gen/data/Data.go
index 8494d4cc..780e5545 100644
--- a/pkg/rpc/kitex_gen/data/Data.go
+++ b/pkg/rpc/kitex_gen/data/Data.go
@@ -1,4 +1,4 @@
-// Code generated by thriftgo (0.2.7). DO NOT EDIT.
+// Code generated by thriftgo (0.3.13). DO NOT EDIT.
package data
@@ -25,7 +25,6 @@ func NewTRowBatch() *TRowBatch {
}
func (p *TRowBatch) InitDefault() {
- *p = TRowBatch{}
}
func (p *TRowBatch) GetNumRows() (v int32) {
@@ -114,10 +113,8 @@ func (p *TRowBatch) Read(iprot thrift.TProtocol) (err error) {
goto ReadFieldError
}
issetNumRows = true
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 2:
if fieldTypeId == thrift.LIST {
@@ -125,67 +122,54 @@ func (p *TRowBatch) Read(iprot thrift.TProtocol) (err error) {
goto ReadFieldError
}
issetRowTuples = true
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 3:
if fieldTypeId == thrift.LIST {
if err = p.ReadField3(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 4:
if fieldTypeId == thrift.STRING {
if err = p.ReadField4(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 5:
if fieldTypeId == thrift.BOOL {
if err = p.ReadField5(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 6:
if fieldTypeId == thrift.I32 {
if err = p.ReadField6(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 7:
if fieldTypeId == thrift.I64 {
if err = p.ReadField7(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
default:
if err = iprot.Skip(fieldTypeId); err != nil {
goto SkipFieldError
}
}
-
if err = iprot.ReadFieldEnd(); err != nil {
goto ReadFieldEndError
}
@@ -222,21 +206,24 @@ RequiredFieldNotSetError:
}
func (p *TRowBatch) ReadField1(iprot thrift.TProtocol) error {
+
+ var _field int32
if v, err := iprot.ReadI32(); err != nil {
return err
} else {
- p.NumRows = v
+ _field = v
}
+ p.NumRows = _field
return nil
}
-
func (p *TRowBatch) ReadField2(iprot thrift.TProtocol) error {
_, size, err := iprot.ReadListBegin()
if err != nil {
return err
}
- p.RowTuples = make([]types.TTupleId, 0, size)
+ _field := make([]types.TTupleId, 0, size)
for i := 0; i < size; i++ {
+
var _elem types.TTupleId
if v, err := iprot.ReadI32(); err != nil {
return err
@@ -244,21 +231,22 @@ func (p *TRowBatch) ReadField2(iprot thrift.TProtocol) error {
_elem = v
}
- p.RowTuples = append(p.RowTuples, _elem)
+ _field = append(_field, _elem)
}
if err := iprot.ReadListEnd(); err != nil {
return err
}
+ p.RowTuples = _field
return nil
}
-
func (p *TRowBatch) ReadField3(iprot thrift.TProtocol) error {
_, size, err := iprot.ReadListBegin()
if err != nil {
return err
}
- p.TupleOffsets = make([]int32, 0, size)
+ _field := make([]int32, 0, size)
for i := 0; i < size; i++ {
+
var _elem int32
if v, err := iprot.ReadI32(); err != nil {
return err
@@ -266,47 +254,56 @@ func (p *TRowBatch) ReadField3(iprot thrift.TProtocol) error {
_elem = v
}
- p.TupleOffsets = append(p.TupleOffsets, _elem)
+ _field = append(_field, _elem)
}
if err := iprot.ReadListEnd(); err != nil {
return err
}
+ p.TupleOffsets = _field
return nil
}
-
func (p *TRowBatch) ReadField4(iprot thrift.TProtocol) error {
+
+ var _field string
if v, err := iprot.ReadString(); err != nil {
return err
} else {
- p.TupleData = v
+ _field = v
}
+ p.TupleData = _field
return nil
}
-
func (p *TRowBatch) ReadField5(iprot thrift.TProtocol) error {
+
+ var _field bool
if v, err := iprot.ReadBool(); err != nil {
return err
} else {
- p.IsCompressed = v
+ _field = v
}
+ p.IsCompressed = _field
return nil
}
-
func (p *TRowBatch) ReadField6(iprot thrift.TProtocol) error {
+
+ var _field int32
if v, err := iprot.ReadI32(); err != nil {
return err
} else {
- p.BeNumber = v
+ _field = v
}
+ p.BeNumber = _field
return nil
}
-
func (p *TRowBatch) ReadField7(iprot thrift.TProtocol) error {
+
+ var _field int64
if v, err := iprot.ReadI64(); err != nil {
return err
} else {
- p.PacketSeq = v
+ _field = v
}
+ p.PacketSeq = _field
return nil
}
@@ -344,7 +341,6 @@ func (p *TRowBatch) Write(oprot thrift.TProtocol) (err error) {
fieldId = 7
goto WriteFieldError
}
-
}
if err = oprot.WriteFieldStop(); err != nil {
goto WriteFieldStopError
@@ -503,6 +499,7 @@ func (p *TRowBatch) String() string {
return ""
}
return fmt.Sprintf("TRowBatch(%+v)", *p)
+
}
func (p *TRowBatch) DeepEqual(ano *TRowBatch) bool {
@@ -603,6 +600,7 @@ type TCell struct {
LongVal *int64 `thrift:"longVal,3,optional" frugal:"3,optional,i64" json:"longVal,omitempty"`
DoubleVal *float64 `thrift:"doubleVal,4,optional" frugal:"4,optional,double" json:"doubleVal,omitempty"`
StringVal *string `thrift:"stringVal,5,optional" frugal:"5,optional,string" json:"stringVal,omitempty"`
+ IsNull *bool `thrift:"isNull,6,optional" frugal:"6,optional,bool" json:"isNull,omitempty"`
}
func NewTCell() *TCell {
@@ -610,7 +608,6 @@ func NewTCell() *TCell {
}
func (p *TCell) InitDefault() {
- *p = TCell{}
}
var TCell_BoolVal_DEFAULT bool
@@ -657,6 +654,15 @@ func (p *TCell) GetStringVal() (v string) {
}
return *p.StringVal
}
+
+var TCell_IsNull_DEFAULT bool
+
+func (p *TCell) GetIsNull() (v bool) {
+ if !p.IsSetIsNull() {
+ return TCell_IsNull_DEFAULT
+ }
+ return *p.IsNull
+}
func (p *TCell) SetBoolVal(val *bool) {
p.BoolVal = val
}
@@ -672,6 +678,9 @@ func (p *TCell) SetDoubleVal(val *float64) {
func (p *TCell) SetStringVal(val *string) {
p.StringVal = val
}
+func (p *TCell) SetIsNull(val *bool) {
+ p.IsNull = val
+}
var fieldIDToName_TCell = map[int16]string{
1: "boolVal",
@@ -679,6 +688,7 @@ var fieldIDToName_TCell = map[int16]string{
3: "longVal",
4: "doubleVal",
5: "stringVal",
+ 6: "isNull",
}
func (p *TCell) IsSetBoolVal() bool {
@@ -701,6 +711,10 @@ func (p *TCell) IsSetStringVal() bool {
return p.StringVal != nil
}
+func (p *TCell) IsSetIsNull() bool {
+ return p.IsNull != nil
+}
+
func (p *TCell) Read(iprot thrift.TProtocol) (err error) {
var fieldTypeId thrift.TType
@@ -725,57 +739,54 @@ func (p *TCell) Read(iprot thrift.TProtocol) (err error) {
if err = p.ReadField1(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 2:
if fieldTypeId == thrift.I32 {
if err = p.ReadField2(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 3:
if fieldTypeId == thrift.I64 {
if err = p.ReadField3(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 4:
if fieldTypeId == thrift.DOUBLE {
if err = p.ReadField4(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 5:
if fieldTypeId == thrift.STRING {
if err = p.ReadField5(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ case 6:
+ if fieldTypeId == thrift.BOOL {
+ if err = p.ReadField6(iprot); err != nil {
+ goto ReadFieldError
}
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
default:
if err = iprot.Skip(fieldTypeId); err != nil {
goto SkipFieldError
}
}
-
if err = iprot.ReadFieldEnd(); err != nil {
goto ReadFieldEndError
}
@@ -801,47 +812,69 @@ ReadStructEndError:
}
func (p *TCell) ReadField1(iprot thrift.TProtocol) error {
+
+ var _field *bool
if v, err := iprot.ReadBool(); err != nil {
return err
} else {
- p.BoolVal = &v
+ _field = &v
}
+ p.BoolVal = _field
return nil
}
-
func (p *TCell) ReadField2(iprot thrift.TProtocol) error {
+
+ var _field *int32
if v, err := iprot.ReadI32(); err != nil {
return err
} else {
- p.IntVal = &v
+ _field = &v
}
+ p.IntVal = _field
return nil
}
-
func (p *TCell) ReadField3(iprot thrift.TProtocol) error {
+
+ var _field *int64
if v, err := iprot.ReadI64(); err != nil {
return err
} else {
- p.LongVal = &v
+ _field = &v
}
+ p.LongVal = _field
return nil
}
-
func (p *TCell) ReadField4(iprot thrift.TProtocol) error {
+
+ var _field *float64
if v, err := iprot.ReadDouble(); err != nil {
return err
} else {
- p.DoubleVal = &v
+ _field = &v
}
+ p.DoubleVal = _field
return nil
}
-
func (p *TCell) ReadField5(iprot thrift.TProtocol) error {
+
+ var _field *string
if v, err := iprot.ReadString(); err != nil {
return err
} else {
- p.StringVal = &v
+ _field = &v
}
+ p.StringVal = _field
+ return nil
+}
+func (p *TCell) ReadField6(iprot thrift.TProtocol) error {
+
+ var _field *bool
+ if v, err := iprot.ReadBool(); err != nil {
+ return err
+ } else {
+ _field = &v
+ }
+ p.IsNull = _field
return nil
}
@@ -871,7 +904,10 @@ func (p *TCell) Write(oprot thrift.TProtocol) (err error) {
fieldId = 5
goto WriteFieldError
}
-
+ if err = p.writeField6(oprot); err != nil {
+ fieldId = 6
+ goto WriteFieldError
+ }
}
if err = oprot.WriteFieldStop(); err != nil {
goto WriteFieldStopError
@@ -985,11 +1021,31 @@ WriteFieldEndError:
return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err)
}
+func (p *TCell) writeField6(oprot thrift.TProtocol) (err error) {
+ if p.IsSetIsNull() {
+ if err = oprot.WriteFieldBegin("isNull", thrift.BOOL, 6); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteBool(*p.IsNull); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 6 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 6 end error: ", p), err)
+}
+
func (p *TCell) String() string {
if p == nil {
return ""
}
return fmt.Sprintf("TCell(%+v)", *p)
+
}
func (p *TCell) DeepEqual(ano *TCell) bool {
@@ -1013,6 +1069,9 @@ func (p *TCell) DeepEqual(ano *TCell) bool {
if !p.Field5DeepEqual(ano.StringVal) {
return false
}
+ if !p.Field6DeepEqual(ano.IsNull) {
+ return false
+ }
return true
}
@@ -1076,6 +1135,18 @@ func (p *TCell) Field5DeepEqual(src *string) bool {
}
return true
}
+func (p *TCell) Field6DeepEqual(src *bool) bool {
+
+ if p.IsNull == src {
+ return true
+ } else if p.IsNull == nil || src == nil {
+ return false
+ }
+ if *p.IsNull != *src {
+ return false
+ }
+ return true
+}
type TResultRow struct {
ColVals []*TCell `thrift:"colVals,1" frugal:"1,default,list" json:"colVals"`
@@ -1086,7 +1157,6 @@ func NewTResultRow() *TResultRow {
}
func (p *TResultRow) InitDefault() {
- *p = TResultRow{}
}
func (p *TResultRow) GetColVals() (v []*TCell) {
@@ -1124,17 +1194,14 @@ func (p *TResultRow) Read(iprot thrift.TProtocol) (err error) {
if err = p.ReadField1(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
default:
if err = iprot.Skip(fieldTypeId); err != nil {
goto SkipFieldError
}
}
-
if err = iprot.ReadFieldEnd(); err != nil {
goto ReadFieldEndError
}
@@ -1164,18 +1231,22 @@ func (p *TResultRow) ReadField1(iprot thrift.TProtocol) error {
if err != nil {
return err
}
- p.ColVals = make([]*TCell, 0, size)
+ _field := make([]*TCell, 0, size)
+ values := make([]TCell, size)
for i := 0; i < size; i++ {
- _elem := NewTCell()
+ _elem := &values[i]
+ _elem.InitDefault()
+
if err := _elem.Read(iprot); err != nil {
return err
}
- p.ColVals = append(p.ColVals, _elem)
+ _field = append(_field, _elem)
}
if err := iprot.ReadListEnd(); err != nil {
return err
}
+ p.ColVals = _field
return nil
}
@@ -1189,7 +1260,6 @@ func (p *TResultRow) Write(oprot thrift.TProtocol) (err error) {
fieldId = 1
goto WriteFieldError
}
-
}
if err = oprot.WriteFieldStop(); err != nil {
goto WriteFieldStopError
@@ -1238,6 +1308,7 @@ func (p *TResultRow) String() string {
return ""
}
return fmt.Sprintf("TResultRow(%+v)", *p)
+
}
func (p *TResultRow) DeepEqual(ano *TResultRow) bool {
@@ -1275,7 +1346,6 @@ func NewTRow() *TRow {
}
func (p *TRow) InitDefault() {
- *p = TRow{}
}
var TRow_ColumnValue_DEFAULT []*TCell
@@ -1322,17 +1392,14 @@ func (p *TRow) Read(iprot thrift.TProtocol) (err error) {
if err = p.ReadField1(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
default:
if err = iprot.Skip(fieldTypeId); err != nil {
goto SkipFieldError
}
}
-
if err = iprot.ReadFieldEnd(); err != nil {
goto ReadFieldEndError
}
@@ -1362,18 +1429,22 @@ func (p *TRow) ReadField1(iprot thrift.TProtocol) error {
if err != nil {
return err
}
- p.ColumnValue = make([]*TCell, 0, size)
+ _field := make([]*TCell, 0, size)
+ values := make([]TCell, size)
for i := 0; i < size; i++ {
- _elem := NewTCell()
+ _elem := &values[i]
+ _elem.InitDefault()
+
if err := _elem.Read(iprot); err != nil {
return err
}
- p.ColumnValue = append(p.ColumnValue, _elem)
+ _field = append(_field, _elem)
}
if err := iprot.ReadListEnd(); err != nil {
return err
}
+ p.ColumnValue = _field
return nil
}
@@ -1387,7 +1458,6 @@ func (p *TRow) Write(oprot thrift.TProtocol) (err error) {
fieldId = 1
goto WriteFieldError
}
-
}
if err = oprot.WriteFieldStop(); err != nil {
goto WriteFieldStopError
@@ -1438,6 +1508,7 @@ func (p *TRow) String() string {
return ""
}
return fmt.Sprintf("TRow(%+v)", *p)
+
}
func (p *TRow) DeepEqual(ano *TRow) bool {
@@ -1478,7 +1549,6 @@ func NewTResultBatch() *TResultBatch {
}
func (p *TResultBatch) InitDefault() {
- *p = TResultBatch{}
}
func (p *TResultBatch) GetRows() (v [][]byte) {
@@ -1553,10 +1623,8 @@ func (p *TResultBatch) Read(iprot thrift.TProtocol) (err error) {
goto ReadFieldError
}
issetRows = true
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 2:
if fieldTypeId == thrift.BOOL {
@@ -1564,10 +1632,8 @@ func (p *TResultBatch) Read(iprot thrift.TProtocol) (err error) {
goto ReadFieldError
}
issetIsCompressed = true
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 3:
if fieldTypeId == thrift.I64 {
@@ -1575,27 +1641,22 @@ func (p *TResultBatch) Read(iprot thrift.TProtocol) (err error) {
goto ReadFieldError
}
issetPacketSeq = true
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 4:
if fieldTypeId == thrift.MAP {
if err = p.ReadField4(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
default:
if err = iprot.Skip(fieldTypeId); err != nil {
goto SkipFieldError
}
}
-
if err = iprot.ReadFieldEnd(); err != nil {
goto ReadFieldEndError
}
@@ -1641,8 +1702,9 @@ func (p *TResultBatch) ReadField1(iprot thrift.TProtocol) error {
if err != nil {
return err
}
- p.Rows = make([][]byte, 0, size)
+ _field := make([][]byte, 0, size)
for i := 0; i < size; i++ {
+
var _elem []byte
if v, err := iprot.ReadBinary(); err != nil {
return err
@@ -1650,38 +1712,42 @@ func (p *TResultBatch) ReadField1(iprot thrift.TProtocol) error {
_elem = []byte(v)
}
- p.Rows = append(p.Rows, _elem)
+ _field = append(_field, _elem)
}
if err := iprot.ReadListEnd(); err != nil {
return err
}
+ p.Rows = _field
return nil
}
-
func (p *TResultBatch) ReadField2(iprot thrift.TProtocol) error {
+
+ var _field bool
if v, err := iprot.ReadBool(); err != nil {
return err
} else {
- p.IsCompressed = v
+ _field = v
}
+ p.IsCompressed = _field
return nil
}
-
func (p *TResultBatch) ReadField3(iprot thrift.TProtocol) error {
+
+ var _field int64
if v, err := iprot.ReadI64(); err != nil {
return err
} else {
- p.PacketSeq = v
+ _field = v
}
+ p.PacketSeq = _field
return nil
}
-
func (p *TResultBatch) ReadField4(iprot thrift.TProtocol) error {
_, _, size, err := iprot.ReadMapBegin()
if err != nil {
return err
}
- p.AttachedInfos = make(map[string]string, size)
+ _field := make(map[string]string, size)
for i := 0; i < size; i++ {
var _key string
if v, err := iprot.ReadString(); err != nil {
@@ -1697,11 +1763,12 @@ func (p *TResultBatch) ReadField4(iprot thrift.TProtocol) error {
_val = v
}
- p.AttachedInfos[_key] = _val
+ _field[_key] = _val
}
if err := iprot.ReadMapEnd(); err != nil {
return err
}
+ p.AttachedInfos = _field
return nil
}
@@ -1727,7 +1794,6 @@ func (p *TResultBatch) Write(oprot thrift.TProtocol) (err error) {
fieldId = 4
goto WriteFieldError
}
-
}
if err = oprot.WriteFieldStop(); err != nil {
goto WriteFieldStopError
@@ -1814,11 +1880,9 @@ func (p *TResultBatch) writeField4(oprot thrift.TProtocol) (err error) {
return err
}
for k, v := range p.AttachedInfos {
-
if err := oprot.WriteString(k); err != nil {
return err
}
-
if err := oprot.WriteString(v); err != nil {
return err
}
@@ -1842,6 +1906,7 @@ func (p *TResultBatch) String() string {
return ""
}
return fmt.Sprintf("TResultBatch(%+v)", *p)
+
}
func (p *TResultBatch) DeepEqual(ano *TResultBatch) bool {
diff --git a/pkg/rpc/kitex_gen/data/k-Data.go b/pkg/rpc/kitex_gen/data/k-Data.go
index 6828fb31..fc31f974 100644
--- a/pkg/rpc/kitex_gen/data/k-Data.go
+++ b/pkg/rpc/kitex_gen/data/k-Data.go
@@ -1,4 +1,4 @@
-// Code generated by Kitex v0.4.4. DO NOT EDIT.
+// Code generated by Kitex v0.8.0. DO NOT EDIT.
package data
@@ -11,6 +11,7 @@ import (
"github.com/apache/thrift/lib/go/thrift"
"github.com/cloudwego/kitex/pkg/protocol/bthrift"
+
"github.com/selectdb/ccr_syncer/pkg/rpc/kitex_gen/types"
)
@@ -602,6 +603,20 @@ func (p *TCell) FastRead(buf []byte) (int, error) {
goto SkipFieldError
}
}
+ case 6:
+ if fieldTypeId == thrift.BOOL {
+ l, err = p.FastReadField6(buf[offset:])
+ offset += l
+ if err != nil {
+ goto ReadFieldError
+ }
+ } else {
+ l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
+ offset += l
+ if err != nil {
+ goto SkipFieldError
+ }
+ }
default:
l, err = bthrift.Binary.Skip(buf[offset:], fieldTypeId)
offset += l
@@ -702,6 +717,19 @@ func (p *TCell) FastReadField5(buf []byte) (int, error) {
return offset, nil
}
+func (p *TCell) FastReadField6(buf []byte) (int, error) {
+ offset := 0
+
+ if v, l, err := bthrift.Binary.ReadBool(buf[offset:]); err != nil {
+ return offset, err
+ } else {
+ offset += l
+ p.IsNull = &v
+
+ }
+ return offset, nil
+}
+
// for compatibility
func (p *TCell) FastWrite(buf []byte) int {
return 0
@@ -715,6 +743,7 @@ func (p *TCell) FastWriteNocopy(buf []byte, binaryWriter bthrift.BinaryWriter) i
offset += p.fastWriteField2(buf[offset:], binaryWriter)
offset += p.fastWriteField3(buf[offset:], binaryWriter)
offset += p.fastWriteField4(buf[offset:], binaryWriter)
+ offset += p.fastWriteField6(buf[offset:], binaryWriter)
offset += p.fastWriteField5(buf[offset:], binaryWriter)
}
offset += bthrift.Binary.WriteFieldStop(buf[offset:])
@@ -731,6 +760,7 @@ func (p *TCell) BLength() int {
l += p.field3Length()
l += p.field4Length()
l += p.field5Length()
+ l += p.field6Length()
}
l += bthrift.Binary.FieldStopLength()
l += bthrift.Binary.StructEndLength()
@@ -792,6 +822,17 @@ func (p *TCell) fastWriteField5(buf []byte, binaryWriter bthrift.BinaryWriter) i
return offset
}
+func (p *TCell) fastWriteField6(buf []byte, binaryWriter bthrift.BinaryWriter) int {
+ offset := 0
+ if p.IsSetIsNull() {
+ offset += bthrift.Binary.WriteFieldBegin(buf[offset:], "isNull", thrift.BOOL, 6)
+ offset += bthrift.Binary.WriteBool(buf[offset:], *p.IsNull)
+
+ offset += bthrift.Binary.WriteFieldEnd(buf[offset:])
+ }
+ return offset
+}
+
func (p *TCell) field1Length() int {
l := 0
if p.IsSetBoolVal() {
@@ -847,6 +888,17 @@ func (p *TCell) field5Length() int {
return l
}
+func (p *TCell) field6Length() int {
+ l := 0
+ if p.IsSetIsNull() {
+ l += bthrift.Binary.FieldBeginLength("isNull", thrift.BOOL, 6)
+ l += bthrift.Binary.BoolLength(*p.IsNull)
+
+ l += bthrift.Binary.FieldEndLength()
+ }
+ return l
+}
+
func (p *TResultRow) FastRead(buf []byte) (int, error) {
var err error
var offset int
diff --git a/pkg/rpc/kitex_gen/datasinks/DataSinks.go b/pkg/rpc/kitex_gen/datasinks/DataSinks.go
index ba8753c7..3b8244c7 100644
--- a/pkg/rpc/kitex_gen/datasinks/DataSinks.go
+++ b/pkg/rpc/kitex_gen/datasinks/DataSinks.go
@@ -1,4 +1,4 @@
-// Code generated by thriftgo (0.2.7). DO NOT EDIT.
+// Code generated by thriftgo (0.3.13). DO NOT EDIT.
package datasinks
@@ -30,6 +30,9 @@ const (
TDataSinkType_JDBC_TABLE_SINK TDataSinkType = 9
TDataSinkType_MULTI_CAST_DATA_STREAM_SINK TDataSinkType = 10
TDataSinkType_GROUP_COMMIT_OLAP_TABLE_SINK TDataSinkType = 11
+ TDataSinkType_GROUP_COMMIT_BLOCK_SINK TDataSinkType = 12
+ TDataSinkType_HIVE_TABLE_SINK TDataSinkType = 13
+ TDataSinkType_ICEBERG_TABLE_SINK TDataSinkType = 14
)
func (p TDataSinkType) String() string {
@@ -58,6 +61,12 @@ func (p TDataSinkType) String() string {
return "MULTI_CAST_DATA_STREAM_SINK"
case TDataSinkType_GROUP_COMMIT_OLAP_TABLE_SINK:
return "GROUP_COMMIT_OLAP_TABLE_SINK"
+ case TDataSinkType_GROUP_COMMIT_BLOCK_SINK:
+ return "GROUP_COMMIT_BLOCK_SINK"
+ case TDataSinkType_HIVE_TABLE_SINK:
+ return "HIVE_TABLE_SINK"
+ case TDataSinkType_ICEBERG_TABLE_SINK:
+ return "ICEBERG_TABLE_SINK"
}
return ""
}
@@ -88,6 +97,12 @@ func TDataSinkTypeFromString(s string) (TDataSinkType, error) {
return TDataSinkType_MULTI_CAST_DATA_STREAM_SINK, nil
case "GROUP_COMMIT_OLAP_TABLE_SINK":
return TDataSinkType_GROUP_COMMIT_OLAP_TABLE_SINK, nil
+ case "GROUP_COMMIT_BLOCK_SINK":
+ return TDataSinkType_GROUP_COMMIT_BLOCK_SINK, nil
+ case "HIVE_TABLE_SINK":
+ return TDataSinkType_HIVE_TABLE_SINK, nil
+ case "ICEBERG_TABLE_SINK":
+ return TDataSinkType_ICEBERG_TABLE_SINK, nil
}
return TDataSinkType(0), fmt.Errorf("not a valid TDataSinkType string")
}
@@ -499,6 +514,194 @@ func (p *TParquetRepetitionType) Value() (driver.Value, error) {
return int64(*p), nil
}
+type TGroupCommitMode int64
+
+const (
+ TGroupCommitMode_SYNC_MODE TGroupCommitMode = 0
+ TGroupCommitMode_ASYNC_MODE TGroupCommitMode = 1
+ TGroupCommitMode_OFF_MODE TGroupCommitMode = 2
+)
+
+func (p TGroupCommitMode) String() string {
+ switch p {
+ case TGroupCommitMode_SYNC_MODE:
+ return "SYNC_MODE"
+ case TGroupCommitMode_ASYNC_MODE:
+ return "ASYNC_MODE"
+ case TGroupCommitMode_OFF_MODE:
+ return "OFF_MODE"
+ }
+ return ""
+}
+
+func TGroupCommitModeFromString(s string) (TGroupCommitMode, error) {
+ switch s {
+ case "SYNC_MODE":
+ return TGroupCommitMode_SYNC_MODE, nil
+ case "ASYNC_MODE":
+ return TGroupCommitMode_ASYNC_MODE, nil
+ case "OFF_MODE":
+ return TGroupCommitMode_OFF_MODE, nil
+ }
+ return TGroupCommitMode(0), fmt.Errorf("not a valid TGroupCommitMode string")
+}
+
+func TGroupCommitModePtr(v TGroupCommitMode) *TGroupCommitMode { return &v }
+func (p *TGroupCommitMode) Scan(value interface{}) (err error) {
+ var result sql.NullInt64
+ err = result.Scan(value)
+ *p = TGroupCommitMode(result.Int64)
+ return
+}
+
+func (p *TGroupCommitMode) Value() (driver.Value, error) {
+ if p == nil {
+ return nil, nil
+ }
+ return int64(*p), nil
+}
+
+type THiveColumnType int64
+
+const (
+ THiveColumnType_PARTITION_KEY THiveColumnType = 0
+ THiveColumnType_REGULAR THiveColumnType = 1
+ THiveColumnType_SYNTHESIZED THiveColumnType = 2
+)
+
+func (p THiveColumnType) String() string {
+ switch p {
+ case THiveColumnType_PARTITION_KEY:
+ return "PARTITION_KEY"
+ case THiveColumnType_REGULAR:
+ return "REGULAR"
+ case THiveColumnType_SYNTHESIZED:
+ return "SYNTHESIZED"
+ }
+ return ""
+}
+
+func THiveColumnTypeFromString(s string) (THiveColumnType, error) {
+ switch s {
+ case "PARTITION_KEY":
+ return THiveColumnType_PARTITION_KEY, nil
+ case "REGULAR":
+ return THiveColumnType_REGULAR, nil
+ case "SYNTHESIZED":
+ return THiveColumnType_SYNTHESIZED, nil
+ }
+ return THiveColumnType(0), fmt.Errorf("not a valid THiveColumnType string")
+}
+
+func THiveColumnTypePtr(v THiveColumnType) *THiveColumnType { return &v }
+func (p *THiveColumnType) Scan(value interface{}) (err error) {
+ var result sql.NullInt64
+ err = result.Scan(value)
+ *p = THiveColumnType(result.Int64)
+ return
+}
+
+func (p *THiveColumnType) Value() (driver.Value, error) {
+ if p == nil {
+ return nil, nil
+ }
+ return int64(*p), nil
+}
+
+type TUpdateMode int64
+
+const (
+ TUpdateMode_NEW TUpdateMode = 0
+ TUpdateMode_APPEND TUpdateMode = 1
+ TUpdateMode_OVERWRITE TUpdateMode = 2
+)
+
+func (p TUpdateMode) String() string {
+ switch p {
+ case TUpdateMode_NEW:
+ return "NEW"
+ case TUpdateMode_APPEND:
+ return "APPEND"
+ case TUpdateMode_OVERWRITE:
+ return "OVERWRITE"
+ }
+ return ""
+}
+
+func TUpdateModeFromString(s string) (TUpdateMode, error) {
+ switch s {
+ case "NEW":
+ return TUpdateMode_NEW, nil
+ case "APPEND":
+ return TUpdateMode_APPEND, nil
+ case "OVERWRITE":
+ return TUpdateMode_OVERWRITE, nil
+ }
+ return TUpdateMode(0), fmt.Errorf("not a valid TUpdateMode string")
+}
+
+func TUpdateModePtr(v TUpdateMode) *TUpdateMode { return &v }
+func (p *TUpdateMode) Scan(value interface{}) (err error) {
+ var result sql.NullInt64
+ err = result.Scan(value)
+ *p = TUpdateMode(result.Int64)
+ return
+}
+
+func (p *TUpdateMode) Value() (driver.Value, error) {
+ if p == nil {
+ return nil, nil
+ }
+ return int64(*p), nil
+}
+
+type TFileContent int64
+
+const (
+ TFileContent_DATA TFileContent = 0
+ TFileContent_POSITION_DELETES TFileContent = 1
+ TFileContent_EQUALITY_DELETES TFileContent = 2
+)
+
+func (p TFileContent) String() string {
+ switch p {
+ case TFileContent_DATA:
+ return "DATA"
+ case TFileContent_POSITION_DELETES:
+ return "POSITION_DELETES"
+ case TFileContent_EQUALITY_DELETES:
+ return "EQUALITY_DELETES"
+ }
+ return ""
+}
+
+func TFileContentFromString(s string) (TFileContent, error) {
+ switch s {
+ case "DATA":
+ return TFileContent_DATA, nil
+ case "POSITION_DELETES":
+ return TFileContent_POSITION_DELETES, nil
+ case "EQUALITY_DELETES":
+ return TFileContent_EQUALITY_DELETES, nil
+ }
+ return TFileContent(0), fmt.Errorf("not a valid TFileContent string")
+}
+
+func TFileContentPtr(v TFileContent) *TFileContent { return &v }
+func (p *TFileContent) Scan(value interface{}) (err error) {
+ var result sql.NullInt64
+ err = result.Scan(value)
+ *p = TFileContent(result.Int64)
+ return
+}
+
+func (p *TFileContent) Value() (driver.Value, error) {
+ if p == nil {
+ return nil, nil
+ }
+ return int64(*p), nil
+}
+
type TParquetSchema struct {
SchemaRepetitionType *TParquetRepetitionType `thrift:"schema_repetition_type,1,optional" frugal:"1,optional,TParquetRepetitionType" json:"schema_repetition_type,omitempty"`
SchemaDataType *TParquetDataType `thrift:"schema_data_type,2,optional" frugal:"2,optional,TParquetDataType" json:"schema_data_type,omitempty"`
@@ -511,7 +714,6 @@ func NewTParquetSchema() *TParquetSchema {
}
func (p *TParquetSchema) InitDefault() {
- *p = TParquetSchema{}
}
var TParquetSchema_SchemaRepetitionType_DEFAULT TParquetRepetitionType
@@ -609,47 +811,38 @@ func (p *TParquetSchema) Read(iprot thrift.TProtocol) (err error) {
if err = p.ReadField1(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 2:
if fieldTypeId == thrift.I32 {
if err = p.ReadField2(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 3:
if fieldTypeId == thrift.STRING {
if err = p.ReadField3(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 4:
if fieldTypeId == thrift.I32 {
if err = p.ReadField4(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
default:
if err = iprot.Skip(fieldTypeId); err != nil {
goto SkipFieldError
}
}
-
if err = iprot.ReadFieldEnd(); err != nil {
goto ReadFieldEndError
}
@@ -675,41 +868,50 @@ ReadStructEndError:
}
func (p *TParquetSchema) ReadField1(iprot thrift.TProtocol) error {
+
+ var _field *TParquetRepetitionType
if v, err := iprot.ReadI32(); err != nil {
return err
} else {
tmp := TParquetRepetitionType(v)
- p.SchemaRepetitionType = &tmp
+ _field = &tmp
}
+ p.SchemaRepetitionType = _field
return nil
}
-
func (p *TParquetSchema) ReadField2(iprot thrift.TProtocol) error {
+
+ var _field *TParquetDataType
if v, err := iprot.ReadI32(); err != nil {
return err
} else {
tmp := TParquetDataType(v)
- p.SchemaDataType = &tmp
+ _field = &tmp
}
+ p.SchemaDataType = _field
return nil
}
-
func (p *TParquetSchema) ReadField3(iprot thrift.TProtocol) error {
+
+ var _field *string
if v, err := iprot.ReadString(); err != nil {
return err
} else {
- p.SchemaColumnName = &v
+ _field = &v
}
+ p.SchemaColumnName = _field
return nil
}
-
func (p *TParquetSchema) ReadField4(iprot thrift.TProtocol) error {
+
+ var _field *TParquetDataLogicalType
if v, err := iprot.ReadI32(); err != nil {
return err
} else {
tmp := TParquetDataLogicalType(v)
- p.SchemaDataLogicalType = &tmp
+ _field = &tmp
}
+ p.SchemaDataLogicalType = _field
return nil
}
@@ -735,7 +937,6 @@ func (p *TParquetSchema) Write(oprot thrift.TProtocol) (err error) {
fieldId = 4
goto WriteFieldError
}
-
}
if err = oprot.WriteFieldStop(); err != nil {
goto WriteFieldStopError
@@ -835,6 +1036,7 @@ func (p *TParquetSchema) String() string {
return ""
}
return fmt.Sprintf("TParquetSchema(%+v)", *p)
+
}
func (p *TParquetSchema) DeepEqual(ano *TParquetSchema) bool {
@@ -908,23 +1110,26 @@ func (p *TParquetSchema) Field4DeepEqual(src *TParquetDataLogicalType) bool {
}
type TResultFileSinkOptions struct {
- FilePath string `thrift:"file_path,1,required" frugal:"1,required,string" json:"file_path"`
- FileFormat plannodes.TFileFormatType `thrift:"file_format,2,required" frugal:"2,required,TFileFormatType" json:"file_format"`
- ColumnSeparator *string `thrift:"column_separator,3,optional" frugal:"3,optional,string" json:"column_separator,omitempty"`
- LineDelimiter *string `thrift:"line_delimiter,4,optional" frugal:"4,optional,string" json:"line_delimiter,omitempty"`
- MaxFileSizeBytes *int64 `thrift:"max_file_size_bytes,5,optional" frugal:"5,optional,i64" json:"max_file_size_bytes,omitempty"`
- BrokerAddresses []*types.TNetworkAddress `thrift:"broker_addresses,6,optional" frugal:"6,optional,list" json:"broker_addresses,omitempty"`
- BrokerProperties map[string]string `thrift:"broker_properties,7,optional" frugal:"7,optional,map" json:"broker_properties,omitempty"`
- SuccessFileName *string `thrift:"success_file_name,8,optional" frugal:"8,optional,string" json:"success_file_name,omitempty"`
- Schema [][]string `thrift:"schema,9,optional" frugal:"9,optional,list>" json:"schema,omitempty"`
- FileProperties map[string]string `thrift:"file_properties,10,optional" frugal:"10,optional,map" json:"file_properties,omitempty"`
- ParquetSchemas []*TParquetSchema `thrift:"parquet_schemas,11,optional" frugal:"11,optional,list" json:"parquet_schemas,omitempty"`
- ParquetCompressionType *TParquetCompressionType `thrift:"parquet_compression_type,12,optional" frugal:"12,optional,TParquetCompressionType" json:"parquet_compression_type,omitempty"`
- ParquetDisableDictionary *bool `thrift:"parquet_disable_dictionary,13,optional" frugal:"13,optional,bool" json:"parquet_disable_dictionary,omitempty"`
- ParquetVersion *TParquetVersion `thrift:"parquet_version,14,optional" frugal:"14,optional,TParquetVersion" json:"parquet_version,omitempty"`
- OrcSchema *string `thrift:"orc_schema,15,optional" frugal:"15,optional,string" json:"orc_schema,omitempty"`
- DeleteExistingFiles *bool `thrift:"delete_existing_files,16,optional" frugal:"16,optional,bool" json:"delete_existing_files,omitempty"`
- FileSuffix *string `thrift:"file_suffix,17,optional" frugal:"17,optional,string" json:"file_suffix,omitempty"`
+ FilePath string `thrift:"file_path,1,required" frugal:"1,required,string" json:"file_path"`
+ FileFormat plannodes.TFileFormatType `thrift:"file_format,2,required" frugal:"2,required,TFileFormatType" json:"file_format"`
+ ColumnSeparator *string `thrift:"column_separator,3,optional" frugal:"3,optional,string" json:"column_separator,omitempty"`
+ LineDelimiter *string `thrift:"line_delimiter,4,optional" frugal:"4,optional,string" json:"line_delimiter,omitempty"`
+ MaxFileSizeBytes *int64 `thrift:"max_file_size_bytes,5,optional" frugal:"5,optional,i64" json:"max_file_size_bytes,omitempty"`
+ BrokerAddresses []*types.TNetworkAddress `thrift:"broker_addresses,6,optional" frugal:"6,optional,list" json:"broker_addresses,omitempty"`
+ BrokerProperties map[string]string `thrift:"broker_properties,7,optional" frugal:"7,optional,map" json:"broker_properties,omitempty"`
+ SuccessFileName *string `thrift:"success_file_name,8,optional" frugal:"8,optional,string" json:"success_file_name,omitempty"`
+ Schema [][]string `thrift:"schema,9,optional" frugal:"9,optional,list>" json:"schema,omitempty"`
+ FileProperties map[string]string `thrift:"file_properties,10,optional" frugal:"10,optional,map" json:"file_properties,omitempty"`
+ ParquetSchemas []*TParquetSchema `thrift:"parquet_schemas,11,optional" frugal:"11,optional,list" json:"parquet_schemas,omitempty"`
+ ParquetCompressionType *TParquetCompressionType `thrift:"parquet_compression_type,12,optional" frugal:"12,optional,TParquetCompressionType" json:"parquet_compression_type,omitempty"`
+ ParquetDisableDictionary *bool `thrift:"parquet_disable_dictionary,13,optional" frugal:"13,optional,bool" json:"parquet_disable_dictionary,omitempty"`
+ ParquetVersion *TParquetVersion `thrift:"parquet_version,14,optional" frugal:"14,optional,TParquetVersion" json:"parquet_version,omitempty"`
+ OrcSchema *string `thrift:"orc_schema,15,optional" frugal:"15,optional,string" json:"orc_schema,omitempty"`
+ DeleteExistingFiles *bool `thrift:"delete_existing_files,16,optional" frugal:"16,optional,bool" json:"delete_existing_files,omitempty"`
+ FileSuffix *string `thrift:"file_suffix,17,optional" frugal:"17,optional,string" json:"file_suffix,omitempty"`
+ WithBom *bool `thrift:"with_bom,18,optional" frugal:"18,optional,bool" json:"with_bom,omitempty"`
+ OrcCompressionType *plannodes.TFileCompressType `thrift:"orc_compression_type,19,optional" frugal:"19,optional,TFileCompressType" json:"orc_compression_type,omitempty"`
+ OrcWriterVersion *int64 `thrift:"orc_writer_version,20,optional" frugal:"20,optional,i64" json:"orc_writer_version,omitempty"`
}
func NewTResultFileSinkOptions() *TResultFileSinkOptions {
@@ -932,7 +1137,6 @@ func NewTResultFileSinkOptions() *TResultFileSinkOptions {
}
func (p *TResultFileSinkOptions) InitDefault() {
- *p = TResultFileSinkOptions{}
}
func (p *TResultFileSinkOptions) GetFilePath() (v string) {
@@ -1077,6 +1281,33 @@ func (p *TResultFileSinkOptions) GetFileSuffix() (v string) {
}
return *p.FileSuffix
}
+
+var TResultFileSinkOptions_WithBom_DEFAULT bool
+
+func (p *TResultFileSinkOptions) GetWithBom() (v bool) {
+ if !p.IsSetWithBom() {
+ return TResultFileSinkOptions_WithBom_DEFAULT
+ }
+ return *p.WithBom
+}
+
+var TResultFileSinkOptions_OrcCompressionType_DEFAULT plannodes.TFileCompressType
+
+func (p *TResultFileSinkOptions) GetOrcCompressionType() (v plannodes.TFileCompressType) {
+ if !p.IsSetOrcCompressionType() {
+ return TResultFileSinkOptions_OrcCompressionType_DEFAULT
+ }
+ return *p.OrcCompressionType
+}
+
+var TResultFileSinkOptions_OrcWriterVersion_DEFAULT int64
+
+func (p *TResultFileSinkOptions) GetOrcWriterVersion() (v int64) {
+ if !p.IsSetOrcWriterVersion() {
+ return TResultFileSinkOptions_OrcWriterVersion_DEFAULT
+ }
+ return *p.OrcWriterVersion
+}
func (p *TResultFileSinkOptions) SetFilePath(val string) {
p.FilePath = val
}
@@ -1128,6 +1359,15 @@ func (p *TResultFileSinkOptions) SetDeleteExistingFiles(val *bool) {
func (p *TResultFileSinkOptions) SetFileSuffix(val *string) {
p.FileSuffix = val
}
+func (p *TResultFileSinkOptions) SetWithBom(val *bool) {
+ p.WithBom = val
+}
+func (p *TResultFileSinkOptions) SetOrcCompressionType(val *plannodes.TFileCompressType) {
+ p.OrcCompressionType = val
+}
+func (p *TResultFileSinkOptions) SetOrcWriterVersion(val *int64) {
+ p.OrcWriterVersion = val
+}
var fieldIDToName_TResultFileSinkOptions = map[int16]string{
1: "file_path",
@@ -1147,6 +1387,9 @@ var fieldIDToName_TResultFileSinkOptions = map[int16]string{
15: "orc_schema",
16: "delete_existing_files",
17: "file_suffix",
+ 18: "with_bom",
+ 19: "orc_compression_type",
+ 20: "orc_writer_version",
}
func (p *TResultFileSinkOptions) IsSetColumnSeparator() bool {
@@ -1209,6 +1452,18 @@ func (p *TResultFileSinkOptions) IsSetFileSuffix() bool {
return p.FileSuffix != nil
}
+func (p *TResultFileSinkOptions) IsSetWithBom() bool {
+ return p.WithBom != nil
+}
+
+func (p *TResultFileSinkOptions) IsSetOrcCompressionType() bool {
+ return p.OrcCompressionType != nil
+}
+
+func (p *TResultFileSinkOptions) IsSetOrcWriterVersion() bool {
+ return p.OrcWriterVersion != nil
+}
+
func (p *TResultFileSinkOptions) Read(iprot thrift.TProtocol) (err error) {
var fieldTypeId thrift.TType
@@ -1236,10 +1491,8 @@ func (p *TResultFileSinkOptions) Read(iprot thrift.TProtocol) (err error) {
goto ReadFieldError
}
issetFilePath = true
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 2:
if fieldTypeId == thrift.I32 {
@@ -1247,167 +1500,158 @@ func (p *TResultFileSinkOptions) Read(iprot thrift.TProtocol) (err error) {
goto ReadFieldError
}
issetFileFormat = true
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 3:
if fieldTypeId == thrift.STRING {
if err = p.ReadField3(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 4:
if fieldTypeId == thrift.STRING {
if err = p.ReadField4(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 5:
if fieldTypeId == thrift.I64 {
if err = p.ReadField5(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 6:
if fieldTypeId == thrift.LIST {
if err = p.ReadField6(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 7:
if fieldTypeId == thrift.MAP {
if err = p.ReadField7(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 8:
if fieldTypeId == thrift.STRING {
if err = p.ReadField8(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 9:
if fieldTypeId == thrift.LIST {
if err = p.ReadField9(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 10:
if fieldTypeId == thrift.MAP {
if err = p.ReadField10(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 11:
if fieldTypeId == thrift.LIST {
if err = p.ReadField11(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 12:
if fieldTypeId == thrift.I32 {
if err = p.ReadField12(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 13:
if fieldTypeId == thrift.BOOL {
if err = p.ReadField13(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 14:
if fieldTypeId == thrift.I32 {
if err = p.ReadField14(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 15:
if fieldTypeId == thrift.STRING {
if err = p.ReadField15(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 16:
if fieldTypeId == thrift.BOOL {
if err = p.ReadField16(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 17:
if fieldTypeId == thrift.STRING {
if err = p.ReadField17(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ case 18:
+ if fieldTypeId == thrift.BOOL {
+ if err = p.ReadField18(iprot); err != nil {
+ goto ReadFieldError
+ }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ case 19:
+ if fieldTypeId == thrift.I32 {
+ if err = p.ReadField19(iprot); err != nil {
+ goto ReadFieldError
+ }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ case 20:
+ if fieldTypeId == thrift.I64 {
+ if err = p.ReadField20(iprot); err != nil {
+ goto ReadFieldError
}
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
default:
if err = iprot.Skip(fieldTypeId); err != nil {
goto SkipFieldError
}
}
-
if err = iprot.ReadFieldEnd(); err != nil {
goto ReadFieldEndError
}
@@ -1444,76 +1688,89 @@ RequiredFieldNotSetError:
}
func (p *TResultFileSinkOptions) ReadField1(iprot thrift.TProtocol) error {
+
+ var _field string
if v, err := iprot.ReadString(); err != nil {
return err
} else {
- p.FilePath = v
+ _field = v
}
+ p.FilePath = _field
return nil
}
-
func (p *TResultFileSinkOptions) ReadField2(iprot thrift.TProtocol) error {
+
+ var _field plannodes.TFileFormatType
if v, err := iprot.ReadI32(); err != nil {
return err
} else {
- p.FileFormat = plannodes.TFileFormatType(v)
+ _field = plannodes.TFileFormatType(v)
}
+ p.FileFormat = _field
return nil
}
-
func (p *TResultFileSinkOptions) ReadField3(iprot thrift.TProtocol) error {
+
+ var _field *string
if v, err := iprot.ReadString(); err != nil {
return err
} else {
- p.ColumnSeparator = &v
+ _field = &v
}
+ p.ColumnSeparator = _field
return nil
}
-
func (p *TResultFileSinkOptions) ReadField4(iprot thrift.TProtocol) error {
+
+ var _field *string
if v, err := iprot.ReadString(); err != nil {
return err
} else {
- p.LineDelimiter = &v
+ _field = &v
}
+ p.LineDelimiter = _field
return nil
}
-
func (p *TResultFileSinkOptions) ReadField5(iprot thrift.TProtocol) error {
+
+ var _field *int64
if v, err := iprot.ReadI64(); err != nil {
return err
} else {
- p.MaxFileSizeBytes = &v
+ _field = &v
}
+ p.MaxFileSizeBytes = _field
return nil
}
-
func (p *TResultFileSinkOptions) ReadField6(iprot thrift.TProtocol) error {
_, size, err := iprot.ReadListBegin()
if err != nil {
return err
}
- p.BrokerAddresses = make([]*types.TNetworkAddress, 0, size)
+ _field := make([]*types.TNetworkAddress, 0, size)
+ values := make([]types.TNetworkAddress, size)
for i := 0; i < size; i++ {
- _elem := types.NewTNetworkAddress()
+ _elem := &values[i]
+ _elem.InitDefault()
+
if err := _elem.Read(iprot); err != nil {
return err
}
- p.BrokerAddresses = append(p.BrokerAddresses, _elem)
+ _field = append(_field, _elem)
}
if err := iprot.ReadListEnd(); err != nil {
return err
}
+ p.BrokerAddresses = _field
return nil
}
-
func (p *TResultFileSinkOptions) ReadField7(iprot thrift.TProtocol) error {
_, _, size, err := iprot.ReadMapBegin()
if err != nil {
return err
}
- p.BrokerProperties = make(map[string]string, size)
+ _field := make(map[string]string, size)
for i := 0; i < size; i++ {
var _key string
if v, err := iprot.ReadString(); err != nil {
@@ -1529,29 +1786,31 @@ func (p *TResultFileSinkOptions) ReadField7(iprot thrift.TProtocol) error {
_val = v
}
- p.BrokerProperties[_key] = _val
+ _field[_key] = _val
}
if err := iprot.ReadMapEnd(); err != nil {
return err
}
+ p.BrokerProperties = _field
return nil
}
-
func (p *TResultFileSinkOptions) ReadField8(iprot thrift.TProtocol) error {
+
+ var _field *string
if v, err := iprot.ReadString(); err != nil {
return err
} else {
- p.SuccessFileName = &v
+ _field = &v
}
+ p.SuccessFileName = _field
return nil
}
-
func (p *TResultFileSinkOptions) ReadField9(iprot thrift.TProtocol) error {
_, size, err := iprot.ReadListBegin()
if err != nil {
return err
}
- p.Schema = make([][]string, 0, size)
+ _field := make([][]string, 0, size)
for i := 0; i < size; i++ {
_, size, err := iprot.ReadListBegin()
if err != nil {
@@ -1559,6 +1818,7 @@ func (p *TResultFileSinkOptions) ReadField9(iprot thrift.TProtocol) error {
}
_elem := make([]string, 0, size)
for i := 0; i < size; i++ {
+
var _elem1 string
if v, err := iprot.ReadString(); err != nil {
return err
@@ -1572,20 +1832,20 @@ func (p *TResultFileSinkOptions) ReadField9(iprot thrift.TProtocol) error {
return err
}
- p.Schema = append(p.Schema, _elem)
+ _field = append(_field, _elem)
}
if err := iprot.ReadListEnd(); err != nil {
return err
}
+ p.Schema = _field
return nil
}
-
func (p *TResultFileSinkOptions) ReadField10(iprot thrift.TProtocol) error {
_, _, size, err := iprot.ReadMapBegin()
if err != nil {
return err
}
- p.FileProperties = make(map[string]string, size)
+ _field := make(map[string]string, size)
for i := 0; i < size; i++ {
var _key string
if v, err := iprot.ReadString(); err != nil {
@@ -1601,103 +1861,153 @@ func (p *TResultFileSinkOptions) ReadField10(iprot thrift.TProtocol) error {
_val = v
}
- p.FileProperties[_key] = _val
+ _field[_key] = _val
}
if err := iprot.ReadMapEnd(); err != nil {
return err
}
+ p.FileProperties = _field
return nil
}
-
func (p *TResultFileSinkOptions) ReadField11(iprot thrift.TProtocol) error {
_, size, err := iprot.ReadListBegin()
if err != nil {
return err
}
- p.ParquetSchemas = make([]*TParquetSchema, 0, size)
+ _field := make([]*TParquetSchema, 0, size)
+ values := make([]TParquetSchema, size)
for i := 0; i < size; i++ {
- _elem := NewTParquetSchema()
+ _elem := &values[i]
+ _elem.InitDefault()
+
if err := _elem.Read(iprot); err != nil {
return err
}
- p.ParquetSchemas = append(p.ParquetSchemas, _elem)
+ _field = append(_field, _elem)
}
if err := iprot.ReadListEnd(); err != nil {
return err
}
+ p.ParquetSchemas = _field
return nil
}
-
func (p *TResultFileSinkOptions) ReadField12(iprot thrift.TProtocol) error {
+
+ var _field *TParquetCompressionType
if v, err := iprot.ReadI32(); err != nil {
return err
} else {
tmp := TParquetCompressionType(v)
- p.ParquetCompressionType = &tmp
+ _field = &tmp
}
+ p.ParquetCompressionType = _field
return nil
}
-
func (p *TResultFileSinkOptions) ReadField13(iprot thrift.TProtocol) error {
+
+ var _field *bool
if v, err := iprot.ReadBool(); err != nil {
return err
} else {
- p.ParquetDisableDictionary = &v
+ _field = &v
}
+ p.ParquetDisableDictionary = _field
return nil
}
-
func (p *TResultFileSinkOptions) ReadField14(iprot thrift.TProtocol) error {
+
+ var _field *TParquetVersion
if v, err := iprot.ReadI32(); err != nil {
return err
} else {
tmp := TParquetVersion(v)
- p.ParquetVersion = &tmp
+ _field = &tmp
}
+ p.ParquetVersion = _field
return nil
}
-
func (p *TResultFileSinkOptions) ReadField15(iprot thrift.TProtocol) error {
+
+ var _field *string
if v, err := iprot.ReadString(); err != nil {
return err
} else {
- p.OrcSchema = &v
+ _field = &v
}
+ p.OrcSchema = _field
return nil
}
-
func (p *TResultFileSinkOptions) ReadField16(iprot thrift.TProtocol) error {
+
+ var _field *bool
if v, err := iprot.ReadBool(); err != nil {
return err
} else {
- p.DeleteExistingFiles = &v
+ _field = &v
}
+ p.DeleteExistingFiles = _field
return nil
}
-
func (p *TResultFileSinkOptions) ReadField17(iprot thrift.TProtocol) error {
+
+ var _field *string
if v, err := iprot.ReadString(); err != nil {
return err
} else {
- p.FileSuffix = &v
+ _field = &v
}
+ p.FileSuffix = _field
return nil
}
+func (p *TResultFileSinkOptions) ReadField18(iprot thrift.TProtocol) error {
-func (p *TResultFileSinkOptions) Write(oprot thrift.TProtocol) (err error) {
- var fieldId int16
- if err = oprot.WriteStructBegin("TResultFileSinkOptions"); err != nil {
- goto WriteStructBeginError
+ var _field *bool
+ if v, err := iprot.ReadBool(); err != nil {
+ return err
+ } else {
+ _field = &v
}
- if p != nil {
- if err = p.writeField1(oprot); err != nil {
- fieldId = 1
- goto WriteFieldError
- }
- if err = p.writeField2(oprot); err != nil {
- fieldId = 2
- goto WriteFieldError
+ p.WithBom = _field
+ return nil
+}
+func (p *TResultFileSinkOptions) ReadField19(iprot thrift.TProtocol) error {
+
+ var _field *plannodes.TFileCompressType
+ if v, err := iprot.ReadI32(); err != nil {
+ return err
+ } else {
+ tmp := plannodes.TFileCompressType(v)
+ _field = &tmp
+ }
+ p.OrcCompressionType = _field
+ return nil
+}
+func (p *TResultFileSinkOptions) ReadField20(iprot thrift.TProtocol) error {
+
+ var _field *int64
+ if v, err := iprot.ReadI64(); err != nil {
+ return err
+ } else {
+ _field = &v
+ }
+ p.OrcWriterVersion = _field
+ return nil
+}
+
+func (p *TResultFileSinkOptions) Write(oprot thrift.TProtocol) (err error) {
+ var fieldId int16
+ if err = oprot.WriteStructBegin("TResultFileSinkOptions"); err != nil {
+ goto WriteStructBeginError
+ }
+ if p != nil {
+ if err = p.writeField1(oprot); err != nil {
+ fieldId = 1
+ goto WriteFieldError
+ }
+ if err = p.writeField2(oprot); err != nil {
+ fieldId = 2
+ goto WriteFieldError
}
if err = p.writeField3(oprot); err != nil {
fieldId = 3
@@ -1759,7 +2069,18 @@ func (p *TResultFileSinkOptions) Write(oprot thrift.TProtocol) (err error) {
fieldId = 17
goto WriteFieldError
}
-
+ if err = p.writeField18(oprot); err != nil {
+ fieldId = 18
+ goto WriteFieldError
+ }
+ if err = p.writeField19(oprot); err != nil {
+ fieldId = 19
+ goto WriteFieldError
+ }
+ if err = p.writeField20(oprot); err != nil {
+ fieldId = 20
+ goto WriteFieldError
+ }
}
if err = oprot.WriteFieldStop(); err != nil {
goto WriteFieldStopError
@@ -1905,11 +2226,9 @@ func (p *TResultFileSinkOptions) writeField7(oprot thrift.TProtocol) (err error)
return err
}
for k, v := range p.BrokerProperties {
-
if err := oprot.WriteString(k); err != nil {
return err
}
-
if err := oprot.WriteString(v); err != nil {
return err
}
@@ -1991,11 +2310,9 @@ func (p *TResultFileSinkOptions) writeField10(oprot thrift.TProtocol) (err error
return err
}
for k, v := range p.FileProperties {
-
if err := oprot.WriteString(k); err != nil {
return err
}
-
if err := oprot.WriteString(v); err != nil {
return err
}
@@ -2155,11 +2472,69 @@ WriteFieldEndError:
return thrift.PrependError(fmt.Sprintf("%T write field 17 end error: ", p), err)
}
+func (p *TResultFileSinkOptions) writeField18(oprot thrift.TProtocol) (err error) {
+ if p.IsSetWithBom() {
+ if err = oprot.WriteFieldBegin("with_bom", thrift.BOOL, 18); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteBool(*p.WithBom); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 18 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 18 end error: ", p), err)
+}
+
+func (p *TResultFileSinkOptions) writeField19(oprot thrift.TProtocol) (err error) {
+ if p.IsSetOrcCompressionType() {
+ if err = oprot.WriteFieldBegin("orc_compression_type", thrift.I32, 19); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteI32(int32(*p.OrcCompressionType)); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 19 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 19 end error: ", p), err)
+}
+
+func (p *TResultFileSinkOptions) writeField20(oprot thrift.TProtocol) (err error) {
+ if p.IsSetOrcWriterVersion() {
+ if err = oprot.WriteFieldBegin("orc_writer_version", thrift.I64, 20); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteI64(*p.OrcWriterVersion); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 20 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 20 end error: ", p), err)
+}
+
func (p *TResultFileSinkOptions) String() string {
if p == nil {
return ""
}
return fmt.Sprintf("TResultFileSinkOptions(%+v)", *p)
+
}
func (p *TResultFileSinkOptions) DeepEqual(ano *TResultFileSinkOptions) bool {
@@ -2219,6 +2594,15 @@ func (p *TResultFileSinkOptions) DeepEqual(ano *TResultFileSinkOptions) bool {
if !p.Field17DeepEqual(ano.FileSuffix) {
return false
}
+ if !p.Field18DeepEqual(ano.WithBom) {
+ return false
+ }
+ if !p.Field19DeepEqual(ano.OrcCompressionType) {
+ return false
+ }
+ if !p.Field20DeepEqual(ano.OrcWriterVersion) {
+ return false
+ }
return true
}
@@ -2427,6 +2811,42 @@ func (p *TResultFileSinkOptions) Field17DeepEqual(src *string) bool {
}
return true
}
+func (p *TResultFileSinkOptions) Field18DeepEqual(src *bool) bool {
+
+ if p.WithBom == src {
+ return true
+ } else if p.WithBom == nil || src == nil {
+ return false
+ }
+ if *p.WithBom != *src {
+ return false
+ }
+ return true
+}
+func (p *TResultFileSinkOptions) Field19DeepEqual(src *plannodes.TFileCompressType) bool {
+
+ if p.OrcCompressionType == src {
+ return true
+ } else if p.OrcCompressionType == nil || src == nil {
+ return false
+ }
+ if *p.OrcCompressionType != *src {
+ return false
+ }
+ return true
+}
+func (p *TResultFileSinkOptions) Field20DeepEqual(src *int64) bool {
+
+ if p.OrcWriterVersion == src {
+ return true
+ } else if p.OrcWriterVersion == nil || src == nil {
+ return false
+ }
+ if *p.OrcWriterVersion != *src {
+ return false
+ }
+ return true
+}
type TMemoryScratchSink struct {
}
@@ -2436,7 +2856,6 @@ func NewTMemoryScratchSink() *TMemoryScratchSink {
}
func (p *TMemoryScratchSink) InitDefault() {
- *p = TMemoryScratchSink{}
}
var fieldIDToName_TMemoryScratchSink = map[int16]string{}
@@ -2461,7 +2880,6 @@ func (p *TMemoryScratchSink) Read(iprot thrift.TProtocol) (err error) {
if err = iprot.Skip(fieldTypeId); err != nil {
goto SkipFieldTypeError
}
-
if err = iprot.ReadFieldEnd(); err != nil {
goto ReadFieldEndError
}
@@ -2489,7 +2907,6 @@ func (p *TMemoryScratchSink) Write(oprot thrift.TProtocol) (err error) {
goto WriteStructBeginError
}
if p != nil {
-
}
if err = oprot.WriteFieldStop(); err != nil {
goto WriteFieldStopError
@@ -2511,6 +2928,7 @@ func (p *TMemoryScratchSink) String() string {
return ""
}
return fmt.Sprintf("TMemoryScratchSink(%+v)", *p)
+
}
func (p *TMemoryScratchSink) DeepEqual(ano *TMemoryScratchSink) bool {
@@ -2533,7 +2951,6 @@ func NewTPlanFragmentDestination() *TPlanFragmentDestination {
}
func (p *TPlanFragmentDestination) InitDefault() {
- *p = TPlanFragmentDestination{}
}
var TPlanFragmentDestination_FragmentInstanceId_DEFAULT *types.TUniqueId
@@ -2617,10 +3034,8 @@ func (p *TPlanFragmentDestination) Read(iprot thrift.TProtocol) (err error) {
goto ReadFieldError
}
issetFragmentInstanceId = true
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 2:
if fieldTypeId == thrift.STRUCT {
@@ -2628,27 +3043,22 @@ func (p *TPlanFragmentDestination) Read(iprot thrift.TProtocol) (err error) {
goto ReadFieldError
}
issetServer = true
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 3:
if fieldTypeId == thrift.STRUCT {
if err = p.ReadField3(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
default:
if err = iprot.Skip(fieldTypeId); err != nil {
goto SkipFieldError
}
}
-
if err = iprot.ReadFieldEnd(); err != nil {
goto ReadFieldEndError
}
@@ -2685,26 +3095,27 @@ RequiredFieldNotSetError:
}
func (p *TPlanFragmentDestination) ReadField1(iprot thrift.TProtocol) error {
- p.FragmentInstanceId = types.NewTUniqueId()
- if err := p.FragmentInstanceId.Read(iprot); err != nil {
+ _field := types.NewTUniqueId()
+ if err := _field.Read(iprot); err != nil {
return err
}
+ p.FragmentInstanceId = _field
return nil
}
-
func (p *TPlanFragmentDestination) ReadField2(iprot thrift.TProtocol) error {
- p.Server = types.NewTNetworkAddress()
- if err := p.Server.Read(iprot); err != nil {
+ _field := types.NewTNetworkAddress()
+ if err := _field.Read(iprot); err != nil {
return err
}
+ p.Server = _field
return nil
}
-
func (p *TPlanFragmentDestination) ReadField3(iprot thrift.TProtocol) error {
- p.BrpcServer = types.NewTNetworkAddress()
- if err := p.BrpcServer.Read(iprot); err != nil {
+ _field := types.NewTNetworkAddress()
+ if err := _field.Read(iprot); err != nil {
return err
}
+ p.BrpcServer = _field
return nil
}
@@ -2726,7 +3137,6 @@ func (p *TPlanFragmentDestination) Write(oprot thrift.TProtocol) (err error) {
fieldId = 3
goto WriteFieldError
}
-
}
if err = oprot.WriteFieldStop(); err != nil {
goto WriteFieldStopError
@@ -2803,6 +3213,7 @@ func (p *TPlanFragmentDestination) String() string {
return ""
}
return fmt.Sprintf("TPlanFragmentDestination(%+v)", *p)
+
}
func (p *TPlanFragmentDestination) DeepEqual(ano *TPlanFragmentDestination) bool {
@@ -2846,13 +3257,19 @@ func (p *TPlanFragmentDestination) Field3DeepEqual(src *types.TNetworkAddress) b
}
type TDataStreamSink struct {
- DestNodeId types.TPlanNodeId `thrift:"dest_node_id,1,required" frugal:"1,required,i32" json:"dest_node_id"`
- OutputPartition *partitions.TDataPartition `thrift:"output_partition,2,required" frugal:"2,required,partitions.TDataPartition" json:"output_partition"`
- IgnoreNotFound *bool `thrift:"ignore_not_found,3,optional" frugal:"3,optional,bool" json:"ignore_not_found,omitempty"`
- OutputExprs []*exprs.TExpr `thrift:"output_exprs,4,optional" frugal:"4,optional,list" json:"output_exprs,omitempty"`
- OutputTupleId *types.TTupleId `thrift:"output_tuple_id,5,optional" frugal:"5,optional,i32" json:"output_tuple_id,omitempty"`
- Conjuncts []*exprs.TExpr `thrift:"conjuncts,6,optional" frugal:"6,optional,list" json:"conjuncts,omitempty"`
- RuntimeFilters []*plannodes.TRuntimeFilterDesc `thrift:"runtime_filters,7,optional" frugal:"7,optional,list" json:"runtime_filters,omitempty"`
+ DestNodeId types.TPlanNodeId `thrift:"dest_node_id,1,required" frugal:"1,required,i32" json:"dest_node_id"`
+ OutputPartition *partitions.TDataPartition `thrift:"output_partition,2,required" frugal:"2,required,partitions.TDataPartition" json:"output_partition"`
+ IgnoreNotFound *bool `thrift:"ignore_not_found,3,optional" frugal:"3,optional,bool" json:"ignore_not_found,omitempty"`
+ OutputExprs []*exprs.TExpr `thrift:"output_exprs,4,optional" frugal:"4,optional,list" json:"output_exprs,omitempty"`
+ OutputTupleId *types.TTupleId `thrift:"output_tuple_id,5,optional" frugal:"5,optional,i32" json:"output_tuple_id,omitempty"`
+ Conjuncts []*exprs.TExpr `thrift:"conjuncts,6,optional" frugal:"6,optional,list" json:"conjuncts,omitempty"`
+ RuntimeFilters []*plannodes.TRuntimeFilterDesc `thrift:"runtime_filters,7,optional" frugal:"7,optional,list" json:"runtime_filters,omitempty"`
+ TabletSinkSchema *descriptors.TOlapTableSchemaParam `thrift:"tablet_sink_schema,8,optional" frugal:"8,optional,descriptors.TOlapTableSchemaParam" json:"tablet_sink_schema,omitempty"`
+ TabletSinkPartition *descriptors.TOlapTablePartitionParam `thrift:"tablet_sink_partition,9,optional" frugal:"9,optional,descriptors.TOlapTablePartitionParam" json:"tablet_sink_partition,omitempty"`
+ TabletSinkLocation *descriptors.TOlapTableLocationParam `thrift:"tablet_sink_location,10,optional" frugal:"10,optional,descriptors.TOlapTableLocationParam" json:"tablet_sink_location,omitempty"`
+ TabletSinkTxnId *int64 `thrift:"tablet_sink_txn_id,11,optional" frugal:"11,optional,i64" json:"tablet_sink_txn_id,omitempty"`
+ TabletSinkTupleId *types.TTupleId `thrift:"tablet_sink_tuple_id,12,optional" frugal:"12,optional,i32" json:"tablet_sink_tuple_id,omitempty"`
+ TabletSinkExprs []*exprs.TExpr `thrift:"tablet_sink_exprs,13,optional" frugal:"13,optional,list" json:"tablet_sink_exprs,omitempty"`
}
func NewTDataStreamSink() *TDataStreamSink {
@@ -2860,7 +3277,6 @@ func NewTDataStreamSink() *TDataStreamSink {
}
func (p *TDataStreamSink) InitDefault() {
- *p = TDataStreamSink{}
}
func (p *TDataStreamSink) GetDestNodeId() (v types.TPlanNodeId) {
@@ -2920,6 +3336,60 @@ func (p *TDataStreamSink) GetRuntimeFilters() (v []*plannodes.TRuntimeFilterDesc
}
return p.RuntimeFilters
}
+
+var TDataStreamSink_TabletSinkSchema_DEFAULT *descriptors.TOlapTableSchemaParam
+
+func (p *TDataStreamSink) GetTabletSinkSchema() (v *descriptors.TOlapTableSchemaParam) {
+ if !p.IsSetTabletSinkSchema() {
+ return TDataStreamSink_TabletSinkSchema_DEFAULT
+ }
+ return p.TabletSinkSchema
+}
+
+var TDataStreamSink_TabletSinkPartition_DEFAULT *descriptors.TOlapTablePartitionParam
+
+func (p *TDataStreamSink) GetTabletSinkPartition() (v *descriptors.TOlapTablePartitionParam) {
+ if !p.IsSetTabletSinkPartition() {
+ return TDataStreamSink_TabletSinkPartition_DEFAULT
+ }
+ return p.TabletSinkPartition
+}
+
+var TDataStreamSink_TabletSinkLocation_DEFAULT *descriptors.TOlapTableLocationParam
+
+func (p *TDataStreamSink) GetTabletSinkLocation() (v *descriptors.TOlapTableLocationParam) {
+ if !p.IsSetTabletSinkLocation() {
+ return TDataStreamSink_TabletSinkLocation_DEFAULT
+ }
+ return p.TabletSinkLocation
+}
+
+var TDataStreamSink_TabletSinkTxnId_DEFAULT int64
+
+func (p *TDataStreamSink) GetTabletSinkTxnId() (v int64) {
+ if !p.IsSetTabletSinkTxnId() {
+ return TDataStreamSink_TabletSinkTxnId_DEFAULT
+ }
+ return *p.TabletSinkTxnId
+}
+
+var TDataStreamSink_TabletSinkTupleId_DEFAULT types.TTupleId
+
+func (p *TDataStreamSink) GetTabletSinkTupleId() (v types.TTupleId) {
+ if !p.IsSetTabletSinkTupleId() {
+ return TDataStreamSink_TabletSinkTupleId_DEFAULT
+ }
+ return *p.TabletSinkTupleId
+}
+
+var TDataStreamSink_TabletSinkExprs_DEFAULT []*exprs.TExpr
+
+func (p *TDataStreamSink) GetTabletSinkExprs() (v []*exprs.TExpr) {
+ if !p.IsSetTabletSinkExprs() {
+ return TDataStreamSink_TabletSinkExprs_DEFAULT
+ }
+ return p.TabletSinkExprs
+}
func (p *TDataStreamSink) SetDestNodeId(val types.TPlanNodeId) {
p.DestNodeId = val
}
@@ -2941,15 +3411,39 @@ func (p *TDataStreamSink) SetConjuncts(val []*exprs.TExpr) {
func (p *TDataStreamSink) SetRuntimeFilters(val []*plannodes.TRuntimeFilterDesc) {
p.RuntimeFilters = val
}
+func (p *TDataStreamSink) SetTabletSinkSchema(val *descriptors.TOlapTableSchemaParam) {
+ p.TabletSinkSchema = val
+}
+func (p *TDataStreamSink) SetTabletSinkPartition(val *descriptors.TOlapTablePartitionParam) {
+ p.TabletSinkPartition = val
+}
+func (p *TDataStreamSink) SetTabletSinkLocation(val *descriptors.TOlapTableLocationParam) {
+ p.TabletSinkLocation = val
+}
+func (p *TDataStreamSink) SetTabletSinkTxnId(val *int64) {
+ p.TabletSinkTxnId = val
+}
+func (p *TDataStreamSink) SetTabletSinkTupleId(val *types.TTupleId) {
+ p.TabletSinkTupleId = val
+}
+func (p *TDataStreamSink) SetTabletSinkExprs(val []*exprs.TExpr) {
+ p.TabletSinkExprs = val
+}
var fieldIDToName_TDataStreamSink = map[int16]string{
- 1: "dest_node_id",
- 2: "output_partition",
- 3: "ignore_not_found",
- 4: "output_exprs",
- 5: "output_tuple_id",
- 6: "conjuncts",
- 7: "runtime_filters",
+ 1: "dest_node_id",
+ 2: "output_partition",
+ 3: "ignore_not_found",
+ 4: "output_exprs",
+ 5: "output_tuple_id",
+ 6: "conjuncts",
+ 7: "runtime_filters",
+ 8: "tablet_sink_schema",
+ 9: "tablet_sink_partition",
+ 10: "tablet_sink_location",
+ 11: "tablet_sink_txn_id",
+ 12: "tablet_sink_tuple_id",
+ 13: "tablet_sink_exprs",
}
func (p *TDataStreamSink) IsSetOutputPartition() bool {
@@ -2976,6 +3470,30 @@ func (p *TDataStreamSink) IsSetRuntimeFilters() bool {
return p.RuntimeFilters != nil
}
+func (p *TDataStreamSink) IsSetTabletSinkSchema() bool {
+ return p.TabletSinkSchema != nil
+}
+
+func (p *TDataStreamSink) IsSetTabletSinkPartition() bool {
+ return p.TabletSinkPartition != nil
+}
+
+func (p *TDataStreamSink) IsSetTabletSinkLocation() bool {
+ return p.TabletSinkLocation != nil
+}
+
+func (p *TDataStreamSink) IsSetTabletSinkTxnId() bool {
+ return p.TabletSinkTxnId != nil
+}
+
+func (p *TDataStreamSink) IsSetTabletSinkTupleId() bool {
+ return p.TabletSinkTupleId != nil
+}
+
+func (p *TDataStreamSink) IsSetTabletSinkExprs() bool {
+ return p.TabletSinkExprs != nil
+}
+
func (p *TDataStreamSink) Read(iprot thrift.TProtocol) (err error) {
var fieldTypeId thrift.TType
@@ -3003,10 +3521,8 @@ func (p *TDataStreamSink) Read(iprot thrift.TProtocol) (err error) {
goto ReadFieldError
}
issetDestNodeId = true
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 2:
if fieldTypeId == thrift.STRUCT {
@@ -3014,67 +3530,102 @@ func (p *TDataStreamSink) Read(iprot thrift.TProtocol) (err error) {
goto ReadFieldError
}
issetOutputPartition = true
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 3:
if fieldTypeId == thrift.BOOL {
if err = p.ReadField3(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 4:
if fieldTypeId == thrift.LIST {
if err = p.ReadField4(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 5:
if fieldTypeId == thrift.I32 {
if err = p.ReadField5(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 6:
if fieldTypeId == thrift.LIST {
if err = p.ReadField6(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 7:
if fieldTypeId == thrift.LIST {
if err = p.ReadField7(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ case 8:
+ if fieldTypeId == thrift.STRUCT {
+ if err = p.ReadField8(iprot); err != nil {
+ goto ReadFieldError
+ }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ case 9:
+ if fieldTypeId == thrift.STRUCT {
+ if err = p.ReadField9(iprot); err != nil {
+ goto ReadFieldError
+ }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ case 10:
+ if fieldTypeId == thrift.STRUCT {
+ if err = p.ReadField10(iprot); err != nil {
+ goto ReadFieldError
+ }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ case 11:
+ if fieldTypeId == thrift.I64 {
+ if err = p.ReadField11(iprot); err != nil {
+ goto ReadFieldError
+ }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ case 12:
+ if fieldTypeId == thrift.I32 {
+ if err = p.ReadField12(iprot); err != nil {
+ goto ReadFieldError
+ }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ case 13:
+ if fieldTypeId == thrift.LIST {
+ if err = p.ReadField13(iprot); err != nil {
+ goto ReadFieldError
}
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
default:
if err = iprot.Skip(fieldTypeId); err != nil {
goto SkipFieldError
}
}
-
if err = iprot.ReadFieldEnd(); err != nil {
goto ReadFieldEndError
}
@@ -3111,97 +3662,182 @@ RequiredFieldNotSetError:
}
func (p *TDataStreamSink) ReadField1(iprot thrift.TProtocol) error {
+
+ var _field types.TPlanNodeId
if v, err := iprot.ReadI32(); err != nil {
return err
} else {
- p.DestNodeId = v
+ _field = v
}
+ p.DestNodeId = _field
return nil
}
-
func (p *TDataStreamSink) ReadField2(iprot thrift.TProtocol) error {
- p.OutputPartition = partitions.NewTDataPartition()
- if err := p.OutputPartition.Read(iprot); err != nil {
+ _field := partitions.NewTDataPartition()
+ if err := _field.Read(iprot); err != nil {
return err
}
+ p.OutputPartition = _field
return nil
}
-
func (p *TDataStreamSink) ReadField3(iprot thrift.TProtocol) error {
+
+ var _field *bool
if v, err := iprot.ReadBool(); err != nil {
return err
} else {
- p.IgnoreNotFound = &v
+ _field = &v
}
+ p.IgnoreNotFound = _field
return nil
}
-
func (p *TDataStreamSink) ReadField4(iprot thrift.TProtocol) error {
_, size, err := iprot.ReadListBegin()
if err != nil {
return err
}
- p.OutputExprs = make([]*exprs.TExpr, 0, size)
+ _field := make([]*exprs.TExpr, 0, size)
+ values := make([]exprs.TExpr, size)
for i := 0; i < size; i++ {
- _elem := exprs.NewTExpr()
+ _elem := &values[i]
+ _elem.InitDefault()
+
if err := _elem.Read(iprot); err != nil {
return err
}
- p.OutputExprs = append(p.OutputExprs, _elem)
+ _field = append(_field, _elem)
}
if err := iprot.ReadListEnd(); err != nil {
return err
}
+ p.OutputExprs = _field
return nil
}
-
func (p *TDataStreamSink) ReadField5(iprot thrift.TProtocol) error {
+
+ var _field *types.TTupleId
if v, err := iprot.ReadI32(); err != nil {
return err
} else {
- p.OutputTupleId = &v
+ _field = &v
}
+ p.OutputTupleId = _field
return nil
}
-
func (p *TDataStreamSink) ReadField6(iprot thrift.TProtocol) error {
_, size, err := iprot.ReadListBegin()
if err != nil {
return err
}
- p.Conjuncts = make([]*exprs.TExpr, 0, size)
+ _field := make([]*exprs.TExpr, 0, size)
+ values := make([]exprs.TExpr, size)
for i := 0; i < size; i++ {
- _elem := exprs.NewTExpr()
+ _elem := &values[i]
+ _elem.InitDefault()
+
if err := _elem.Read(iprot); err != nil {
return err
}
- p.Conjuncts = append(p.Conjuncts, _elem)
+ _field = append(_field, _elem)
}
if err := iprot.ReadListEnd(); err != nil {
return err
}
+ p.Conjuncts = _field
return nil
}
-
func (p *TDataStreamSink) ReadField7(iprot thrift.TProtocol) error {
_, size, err := iprot.ReadListBegin()
if err != nil {
return err
}
- p.RuntimeFilters = make([]*plannodes.TRuntimeFilterDesc, 0, size)
+ _field := make([]*plannodes.TRuntimeFilterDesc, 0, size)
+ values := make([]plannodes.TRuntimeFilterDesc, size)
for i := 0; i < size; i++ {
- _elem := plannodes.NewTRuntimeFilterDesc()
+ _elem := &values[i]
+ _elem.InitDefault()
+
if err := _elem.Read(iprot); err != nil {
return err
}
- p.RuntimeFilters = append(p.RuntimeFilters, _elem)
+ _field = append(_field, _elem)
}
if err := iprot.ReadListEnd(); err != nil {
return err
}
+ p.RuntimeFilters = _field
+ return nil
+}
+func (p *TDataStreamSink) ReadField8(iprot thrift.TProtocol) error {
+ _field := descriptors.NewTOlapTableSchemaParam()
+ if err := _field.Read(iprot); err != nil {
+ return err
+ }
+ p.TabletSinkSchema = _field
+ return nil
+}
+func (p *TDataStreamSink) ReadField9(iprot thrift.TProtocol) error {
+ _field := descriptors.NewTOlapTablePartitionParam()
+ if err := _field.Read(iprot); err != nil {
+ return err
+ }
+ p.TabletSinkPartition = _field
+ return nil
+}
+func (p *TDataStreamSink) ReadField10(iprot thrift.TProtocol) error {
+ _field := descriptors.NewTOlapTableLocationParam()
+ if err := _field.Read(iprot); err != nil {
+ return err
+ }
+ p.TabletSinkLocation = _field
+ return nil
+}
+func (p *TDataStreamSink) ReadField11(iprot thrift.TProtocol) error {
+
+ var _field *int64
+ if v, err := iprot.ReadI64(); err != nil {
+ return err
+ } else {
+ _field = &v
+ }
+ p.TabletSinkTxnId = _field
+ return nil
+}
+func (p *TDataStreamSink) ReadField12(iprot thrift.TProtocol) error {
+
+ var _field *types.TTupleId
+ if v, err := iprot.ReadI32(); err != nil {
+ return err
+ } else {
+ _field = &v
+ }
+ p.TabletSinkTupleId = _field
+ return nil
+}
+func (p *TDataStreamSink) ReadField13(iprot thrift.TProtocol) error {
+ _, size, err := iprot.ReadListBegin()
+ if err != nil {
+ return err
+ }
+ _field := make([]*exprs.TExpr, 0, size)
+ values := make([]exprs.TExpr, size)
+ for i := 0; i < size; i++ {
+ _elem := &values[i]
+ _elem.InitDefault()
+
+ if err := _elem.Read(iprot); err != nil {
+ return err
+ }
+
+ _field = append(_field, _elem)
+ }
+ if err := iprot.ReadListEnd(); err != nil {
+ return err
+ }
+ p.TabletSinkExprs = _field
return nil
}
@@ -3239,7 +3875,30 @@ func (p *TDataStreamSink) Write(oprot thrift.TProtocol) (err error) {
fieldId = 7
goto WriteFieldError
}
-
+ if err = p.writeField8(oprot); err != nil {
+ fieldId = 8
+ goto WriteFieldError
+ }
+ if err = p.writeField9(oprot); err != nil {
+ fieldId = 9
+ goto WriteFieldError
+ }
+ if err = p.writeField10(oprot); err != nil {
+ fieldId = 10
+ goto WriteFieldError
+ }
+ if err = p.writeField11(oprot); err != nil {
+ fieldId = 11
+ goto WriteFieldError
+ }
+ if err = p.writeField12(oprot); err != nil {
+ fieldId = 12
+ goto WriteFieldError
+ }
+ if err = p.writeField13(oprot); err != nil {
+ fieldId = 13
+ goto WriteFieldError
+ }
}
if err = oprot.WriteFieldStop(); err != nil {
goto WriteFieldStopError
@@ -3411,11 +4070,134 @@ WriteFieldEndError:
return thrift.PrependError(fmt.Sprintf("%T write field 7 end error: ", p), err)
}
+func (p *TDataStreamSink) writeField8(oprot thrift.TProtocol) (err error) {
+ if p.IsSetTabletSinkSchema() {
+ if err = oprot.WriteFieldBegin("tablet_sink_schema", thrift.STRUCT, 8); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := p.TabletSinkSchema.Write(oprot); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 8 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 8 end error: ", p), err)
+}
+
+func (p *TDataStreamSink) writeField9(oprot thrift.TProtocol) (err error) {
+ if p.IsSetTabletSinkPartition() {
+ if err = oprot.WriteFieldBegin("tablet_sink_partition", thrift.STRUCT, 9); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := p.TabletSinkPartition.Write(oprot); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 9 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 9 end error: ", p), err)
+}
+
+func (p *TDataStreamSink) writeField10(oprot thrift.TProtocol) (err error) {
+ if p.IsSetTabletSinkLocation() {
+ if err = oprot.WriteFieldBegin("tablet_sink_location", thrift.STRUCT, 10); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := p.TabletSinkLocation.Write(oprot); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 10 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 10 end error: ", p), err)
+}
+
+func (p *TDataStreamSink) writeField11(oprot thrift.TProtocol) (err error) {
+ if p.IsSetTabletSinkTxnId() {
+ if err = oprot.WriteFieldBegin("tablet_sink_txn_id", thrift.I64, 11); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteI64(*p.TabletSinkTxnId); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 11 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 11 end error: ", p), err)
+}
+
+func (p *TDataStreamSink) writeField12(oprot thrift.TProtocol) (err error) {
+ if p.IsSetTabletSinkTupleId() {
+ if err = oprot.WriteFieldBegin("tablet_sink_tuple_id", thrift.I32, 12); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteI32(*p.TabletSinkTupleId); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 12 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 12 end error: ", p), err)
+}
+
+func (p *TDataStreamSink) writeField13(oprot thrift.TProtocol) (err error) {
+ if p.IsSetTabletSinkExprs() {
+ if err = oprot.WriteFieldBegin("tablet_sink_exprs", thrift.LIST, 13); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteListBegin(thrift.STRUCT, len(p.TabletSinkExprs)); err != nil {
+ return err
+ }
+ for _, v := range p.TabletSinkExprs {
+ if err := v.Write(oprot); err != nil {
+ return err
+ }
+ }
+ if err := oprot.WriteListEnd(); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 13 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 13 end error: ", p), err)
+}
+
func (p *TDataStreamSink) String() string {
if p == nil {
return ""
}
return fmt.Sprintf("TDataStreamSink(%+v)", *p)
+
}
func (p *TDataStreamSink) DeepEqual(ano *TDataStreamSink) bool {
@@ -3445,6 +4227,24 @@ func (p *TDataStreamSink) DeepEqual(ano *TDataStreamSink) bool {
if !p.Field7DeepEqual(ano.RuntimeFilters) {
return false
}
+ if !p.Field8DeepEqual(ano.TabletSinkSchema) {
+ return false
+ }
+ if !p.Field9DeepEqual(ano.TabletSinkPartition) {
+ return false
+ }
+ if !p.Field10DeepEqual(ano.TabletSinkLocation) {
+ return false
+ }
+ if !p.Field11DeepEqual(ano.TabletSinkTxnId) {
+ return false
+ }
+ if !p.Field12DeepEqual(ano.TabletSinkTupleId) {
+ return false
+ }
+ if !p.Field13DeepEqual(ano.TabletSinkExprs) {
+ return false
+ }
return true
}
@@ -3525,6 +4325,64 @@ func (p *TDataStreamSink) Field7DeepEqual(src []*plannodes.TRuntimeFilterDesc) b
}
return true
}
+func (p *TDataStreamSink) Field8DeepEqual(src *descriptors.TOlapTableSchemaParam) bool {
+
+ if !p.TabletSinkSchema.DeepEqual(src) {
+ return false
+ }
+ return true
+}
+func (p *TDataStreamSink) Field9DeepEqual(src *descriptors.TOlapTablePartitionParam) bool {
+
+ if !p.TabletSinkPartition.DeepEqual(src) {
+ return false
+ }
+ return true
+}
+func (p *TDataStreamSink) Field10DeepEqual(src *descriptors.TOlapTableLocationParam) bool {
+
+ if !p.TabletSinkLocation.DeepEqual(src) {
+ return false
+ }
+ return true
+}
+func (p *TDataStreamSink) Field11DeepEqual(src *int64) bool {
+
+ if p.TabletSinkTxnId == src {
+ return true
+ } else if p.TabletSinkTxnId == nil || src == nil {
+ return false
+ }
+ if *p.TabletSinkTxnId != *src {
+ return false
+ }
+ return true
+}
+func (p *TDataStreamSink) Field12DeepEqual(src *types.TTupleId) bool {
+
+ if p.TabletSinkTupleId == src {
+ return true
+ } else if p.TabletSinkTupleId == nil || src == nil {
+ return false
+ }
+ if *p.TabletSinkTupleId != *src {
+ return false
+ }
+ return true
+}
+func (p *TDataStreamSink) Field13DeepEqual(src []*exprs.TExpr) bool {
+
+ if len(p.TabletSinkExprs) != len(src) {
+ return false
+ }
+ for i, v := range p.TabletSinkExprs {
+ _src := src[i]
+ if !v.DeepEqual(_src) {
+ return false
+ }
+ }
+ return true
+}
type TMultiCastDataStreamSink struct {
Sinks []*TDataStreamSink `thrift:"sinks,1,optional" frugal:"1,optional,list" json:"sinks,omitempty"`
@@ -3536,7 +4394,6 @@ func NewTMultiCastDataStreamSink() *TMultiCastDataStreamSink {
}
func (p *TMultiCastDataStreamSink) InitDefault() {
- *p = TMultiCastDataStreamSink{}
}
var TMultiCastDataStreamSink_Sinks_DEFAULT []*TDataStreamSink
@@ -3600,27 +4457,22 @@ func (p *TMultiCastDataStreamSink) Read(iprot thrift.TProtocol) (err error) {
if err = p.ReadField1(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 2:
if fieldTypeId == thrift.LIST {
if err = p.ReadField2(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
default:
if err = iprot.Skip(fieldTypeId); err != nil {
goto SkipFieldError
}
}
-
if err = iprot.ReadFieldEnd(); err != nil {
goto ReadFieldEndError
}
@@ -3650,35 +4502,41 @@ func (p *TMultiCastDataStreamSink) ReadField1(iprot thrift.TProtocol) error {
if err != nil {
return err
}
- p.Sinks = make([]*TDataStreamSink, 0, size)
+ _field := make([]*TDataStreamSink, 0, size)
+ values := make([]TDataStreamSink, size)
for i := 0; i < size; i++ {
- _elem := NewTDataStreamSink()
+ _elem := &values[i]
+ _elem.InitDefault()
+
if err := _elem.Read(iprot); err != nil {
return err
}
- p.Sinks = append(p.Sinks, _elem)
+ _field = append(_field, _elem)
}
if err := iprot.ReadListEnd(); err != nil {
return err
}
+ p.Sinks = _field
return nil
}
-
func (p *TMultiCastDataStreamSink) ReadField2(iprot thrift.TProtocol) error {
_, size, err := iprot.ReadListBegin()
if err != nil {
return err
}
- p.Destinations = make([][]*TPlanFragmentDestination, 0, size)
+ _field := make([][]*TPlanFragmentDestination, 0, size)
for i := 0; i < size; i++ {
_, size, err := iprot.ReadListBegin()
if err != nil {
return err
}
_elem := make([]*TPlanFragmentDestination, 0, size)
+ values := make([]TPlanFragmentDestination, size)
for i := 0; i < size; i++ {
- _elem1 := NewTPlanFragmentDestination()
+ _elem1 := &values[i]
+ _elem1.InitDefault()
+
if err := _elem1.Read(iprot); err != nil {
return err
}
@@ -3689,11 +4547,12 @@ func (p *TMultiCastDataStreamSink) ReadField2(iprot thrift.TProtocol) error {
return err
}
- p.Destinations = append(p.Destinations, _elem)
+ _field = append(_field, _elem)
}
if err := iprot.ReadListEnd(); err != nil {
return err
}
+ p.Destinations = _field
return nil
}
@@ -3711,7 +4570,6 @@ func (p *TMultiCastDataStreamSink) Write(oprot thrift.TProtocol) (err error) {
fieldId = 2
goto WriteFieldError
}
-
}
if err = oprot.WriteFieldStop(); err != nil {
goto WriteFieldStopError
@@ -3797,6 +4655,7 @@ func (p *TMultiCastDataStreamSink) String() string {
return ""
}
return fmt.Sprintf("TMultiCastDataStreamSink(%+v)", *p)
+
}
func (p *TMultiCastDataStreamSink) DeepEqual(ano *TMultiCastDataStreamSink) bool {
@@ -3859,7 +4718,6 @@ func NewTFetchOption() *TFetchOption {
}
func (p *TFetchOption) InitDefault() {
- *p = TFetchOption{}
}
var TFetchOption_UseTwoPhaseFetch_DEFAULT bool
@@ -3957,47 +4815,38 @@ func (p *TFetchOption) Read(iprot thrift.TProtocol) (err error) {
if err = p.ReadField1(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 2:
if fieldTypeId == thrift.STRUCT {
if err = p.ReadField2(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 3:
if fieldTypeId == thrift.BOOL {
if err = p.ReadField3(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 4:
if fieldTypeId == thrift.LIST {
if err = p.ReadField4(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
default:
if err = iprot.Skip(fieldTypeId); err != nil {
goto SkipFieldError
}
}
-
if err = iprot.ReadFieldEnd(); err != nil {
goto ReadFieldEndError
}
@@ -4023,48 +4872,56 @@ ReadStructEndError:
}
func (p *TFetchOption) ReadField1(iprot thrift.TProtocol) error {
+
+ var _field *bool
if v, err := iprot.ReadBool(); err != nil {
return err
} else {
- p.UseTwoPhaseFetch = &v
+ _field = &v
}
+ p.UseTwoPhaseFetch = _field
return nil
}
-
func (p *TFetchOption) ReadField2(iprot thrift.TProtocol) error {
- p.NodesInfo = descriptors.NewTPaloNodesInfo()
- if err := p.NodesInfo.Read(iprot); err != nil {
+ _field := descriptors.NewTPaloNodesInfo()
+ if err := _field.Read(iprot); err != nil {
return err
}
+ p.NodesInfo = _field
return nil
}
-
func (p *TFetchOption) ReadField3(iprot thrift.TProtocol) error {
+
+ var _field *bool
if v, err := iprot.ReadBool(); err != nil {
return err
} else {
- p.FetchRowStore = &v
+ _field = &v
}
+ p.FetchRowStore = _field
return nil
}
-
func (p *TFetchOption) ReadField4(iprot thrift.TProtocol) error {
_, size, err := iprot.ReadListBegin()
if err != nil {
return err
}
- p.ColumnDesc = make([]*descriptors.TColumn, 0, size)
+ _field := make([]*descriptors.TColumn, 0, size)
+ values := make([]descriptors.TColumn, size)
for i := 0; i < size; i++ {
- _elem := descriptors.NewTColumn()
+ _elem := &values[i]
+ _elem.InitDefault()
+
if err := _elem.Read(iprot); err != nil {
return err
}
- p.ColumnDesc = append(p.ColumnDesc, _elem)
+ _field = append(_field, _elem)
}
if err := iprot.ReadListEnd(); err != nil {
return err
}
+ p.ColumnDesc = _field
return nil
}
@@ -4090,7 +4947,6 @@ func (p *TFetchOption) Write(oprot thrift.TProtocol) (err error) {
fieldId = 4
goto WriteFieldError
}
-
}
if err = oprot.WriteFieldStop(); err != nil {
goto WriteFieldStopError
@@ -4198,6 +5054,7 @@ func (p *TFetchOption) String() string {
return ""
}
return fmt.Sprintf("TFetchOption(%+v)", *p)
+
}
func (p *TFetchOption) DeepEqual(ano *TFetchOption) bool {
@@ -4277,7 +5134,6 @@ func NewTResultSink() *TResultSink {
}
func (p *TResultSink) InitDefault() {
- *p = TResultSink{}
}
var TResultSink_Type_DEFAULT TResultSinkType
@@ -4358,37 +5214,30 @@ func (p *TResultSink) Read(iprot thrift.TProtocol) (err error) {
if err = p.ReadField1(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 2:
if fieldTypeId == thrift.STRUCT {
if err = p.ReadField2(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 3:
if fieldTypeId == thrift.STRUCT {
if err = p.ReadField3(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
default:
if err = iprot.Skip(fieldTypeId); err != nil {
goto SkipFieldError
}
}
-
if err = iprot.ReadFieldEnd(); err != nil {
goto ReadFieldEndError
}
@@ -4414,28 +5263,31 @@ ReadStructEndError:
}
func (p *TResultSink) ReadField1(iprot thrift.TProtocol) error {
+
+ var _field *TResultSinkType
if v, err := iprot.ReadI32(); err != nil {
return err
} else {
tmp := TResultSinkType(v)
- p.Type = &tmp
+ _field = &tmp
}
+ p.Type = _field
return nil
}
-
func (p *TResultSink) ReadField2(iprot thrift.TProtocol) error {
- p.FileOptions = NewTResultFileSinkOptions()
- if err := p.FileOptions.Read(iprot); err != nil {
+ _field := NewTResultFileSinkOptions()
+ if err := _field.Read(iprot); err != nil {
return err
}
+ p.FileOptions = _field
return nil
}
-
func (p *TResultSink) ReadField3(iprot thrift.TProtocol) error {
- p.FetchOption = NewTFetchOption()
- if err := p.FetchOption.Read(iprot); err != nil {
+ _field := NewTFetchOption()
+ if err := _field.Read(iprot); err != nil {
return err
}
+ p.FetchOption = _field
return nil
}
@@ -4457,7 +5309,6 @@ func (p *TResultSink) Write(oprot thrift.TProtocol) (err error) {
fieldId = 3
goto WriteFieldError
}
-
}
if err = oprot.WriteFieldStop(); err != nil {
goto WriteFieldStopError
@@ -4538,6 +5389,7 @@ func (p *TResultSink) String() string {
return ""
}
return fmt.Sprintf("TResultSink(%+v)", *p)
+
}
func (p *TResultSink) DeepEqual(ano *TResultSink) bool {
@@ -4599,7 +5451,6 @@ func NewTResultFileSink() *TResultFileSink {
}
func (p *TResultFileSink) InitDefault() {
- *p = TResultFileSink{}
}
var TResultFileSink_FileOptions_DEFAULT *TResultFileSinkOptions
@@ -4731,67 +5582,54 @@ func (p *TResultFileSink) Read(iprot thrift.TProtocol) (err error) {
if err = p.ReadField1(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 2:
if fieldTypeId == thrift.I32 {
if err = p.ReadField2(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 3:
if fieldTypeId == thrift.I32 {
if err = p.ReadField3(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 4:
if fieldTypeId == thrift.I32 {
if err = p.ReadField4(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 5:
if fieldTypeId == thrift.STRING {
if err = p.ReadField5(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 6:
if fieldTypeId == thrift.STRING {
if err = p.ReadField6(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
default:
if err = iprot.Skip(fieldTypeId); err != nil {
goto SkipFieldError
}
}
-
if err = iprot.ReadFieldEnd(); err != nil {
goto ReadFieldEndError
}
@@ -4817,56 +5655,67 @@ ReadStructEndError:
}
func (p *TResultFileSink) ReadField1(iprot thrift.TProtocol) error {
- p.FileOptions = NewTResultFileSinkOptions()
- if err := p.FileOptions.Read(iprot); err != nil {
+ _field := NewTResultFileSinkOptions()
+ if err := _field.Read(iprot); err != nil {
return err
}
+ p.FileOptions = _field
return nil
}
-
func (p *TResultFileSink) ReadField2(iprot thrift.TProtocol) error {
+
+ var _field *types.TStorageBackendType
if v, err := iprot.ReadI32(); err != nil {
return err
} else {
tmp := types.TStorageBackendType(v)
- p.StorageBackendType = &tmp
+ _field = &tmp
}
+ p.StorageBackendType = _field
return nil
}
-
func (p *TResultFileSink) ReadField3(iprot thrift.TProtocol) error {
+
+ var _field *types.TPlanNodeId
if v, err := iprot.ReadI32(); err != nil {
return err
} else {
- p.DestNodeId = &v
+ _field = &v
}
+ p.DestNodeId = _field
return nil
}
-
func (p *TResultFileSink) ReadField4(iprot thrift.TProtocol) error {
+
+ var _field *types.TTupleId
if v, err := iprot.ReadI32(); err != nil {
return err
} else {
- p.OutputTupleId = &v
+ _field = &v
}
+ p.OutputTupleId = _field
return nil
}
-
func (p *TResultFileSink) ReadField5(iprot thrift.TProtocol) error {
+
+ var _field *string
if v, err := iprot.ReadString(); err != nil {
return err
} else {
- p.Header = &v
+ _field = &v
}
+ p.Header = _field
return nil
}
-
func (p *TResultFileSink) ReadField6(iprot thrift.TProtocol) error {
+
+ var _field *string
if v, err := iprot.ReadString(); err != nil {
return err
} else {
- p.HeaderType = &v
+ _field = &v
}
+ p.HeaderType = _field
return nil
}
@@ -4900,7 +5749,6 @@ func (p *TResultFileSink) Write(oprot thrift.TProtocol) (err error) {
fieldId = 6
goto WriteFieldError
}
-
}
if err = oprot.WriteFieldStop(); err != nil {
goto WriteFieldStopError
@@ -5038,6 +5886,7 @@ func (p *TResultFileSink) String() string {
return ""
}
return fmt.Sprintf("TResultFileSink(%+v)", *p)
+
}
func (p *TResultFileSink) DeepEqual(ano *TResultFileSink) bool {
@@ -5150,7 +5999,6 @@ func NewTMysqlTableSink() *TMysqlTableSink {
}
func (p *TMysqlTableSink) InitDefault() {
- *p = TMysqlTableSink{}
}
func (p *TMysqlTableSink) GetHost() (v string) {
@@ -5244,10 +6092,8 @@ func (p *TMysqlTableSink) Read(iprot thrift.TProtocol) (err error) {
goto ReadFieldError
}
issetHost = true
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 2:
if fieldTypeId == thrift.I32 {
@@ -5255,10 +6101,8 @@ func (p *TMysqlTableSink) Read(iprot thrift.TProtocol) (err error) {
goto ReadFieldError
}
issetPort = true
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 3:
if fieldTypeId == thrift.STRING {
@@ -5266,10 +6110,8 @@ func (p *TMysqlTableSink) Read(iprot thrift.TProtocol) (err error) {
goto ReadFieldError
}
issetUser = true
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 4:
if fieldTypeId == thrift.STRING {
@@ -5277,10 +6119,8 @@ func (p *TMysqlTableSink) Read(iprot thrift.TProtocol) (err error) {
goto ReadFieldError
}
issetPasswd = true
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 5:
if fieldTypeId == thrift.STRING {
@@ -5288,10 +6128,8 @@ func (p *TMysqlTableSink) Read(iprot thrift.TProtocol) (err error) {
goto ReadFieldError
}
issetDb = true
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 6:
if fieldTypeId == thrift.STRING {
@@ -5299,10 +6137,8 @@ func (p *TMysqlTableSink) Read(iprot thrift.TProtocol) (err error) {
goto ReadFieldError
}
issetTable = true
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 7:
if fieldTypeId == thrift.STRING {
@@ -5310,17 +6146,14 @@ func (p *TMysqlTableSink) Read(iprot thrift.TProtocol) (err error) {
goto ReadFieldError
}
issetCharset = true
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
default:
if err = iprot.Skip(fieldTypeId); err != nil {
goto SkipFieldError
}
}
-
if err = iprot.ReadFieldEnd(); err != nil {
goto ReadFieldEndError
}
@@ -5382,65 +6215,80 @@ RequiredFieldNotSetError:
}
func (p *TMysqlTableSink) ReadField1(iprot thrift.TProtocol) error {
+
+ var _field string
if v, err := iprot.ReadString(); err != nil {
return err
} else {
- p.Host = v
+ _field = v
}
+ p.Host = _field
return nil
}
-
func (p *TMysqlTableSink) ReadField2(iprot thrift.TProtocol) error {
+
+ var _field int32
if v, err := iprot.ReadI32(); err != nil {
return err
} else {
- p.Port = v
+ _field = v
}
+ p.Port = _field
return nil
}
-
func (p *TMysqlTableSink) ReadField3(iprot thrift.TProtocol) error {
+
+ var _field string
if v, err := iprot.ReadString(); err != nil {
return err
} else {
- p.User = v
+ _field = v
}
+ p.User = _field
return nil
}
-
func (p *TMysqlTableSink) ReadField4(iprot thrift.TProtocol) error {
+
+ var _field string
if v, err := iprot.ReadString(); err != nil {
return err
} else {
- p.Passwd = v
+ _field = v
}
+ p.Passwd = _field
return nil
}
-
func (p *TMysqlTableSink) ReadField5(iprot thrift.TProtocol) error {
+
+ var _field string
if v, err := iprot.ReadString(); err != nil {
return err
} else {
- p.Db = v
+ _field = v
}
+ p.Db = _field
return nil
}
-
func (p *TMysqlTableSink) ReadField6(iprot thrift.TProtocol) error {
+
+ var _field string
if v, err := iprot.ReadString(); err != nil {
return err
} else {
- p.Table = v
+ _field = v
}
+ p.Table = _field
return nil
}
-
func (p *TMysqlTableSink) ReadField7(iprot thrift.TProtocol) error {
+
+ var _field string
if v, err := iprot.ReadString(); err != nil {
return err
} else {
- p.Charset = v
+ _field = v
}
+ p.Charset = _field
return nil
}
@@ -5478,7 +6326,6 @@ func (p *TMysqlTableSink) Write(oprot thrift.TProtocol) (err error) {
fieldId = 7
goto WriteFieldError
}
-
}
if err = oprot.WriteFieldStop(); err != nil {
goto WriteFieldStopError
@@ -5621,6 +6468,7 @@ func (p *TMysqlTableSink) String() string {
return ""
}
return fmt.Sprintf("TMysqlTableSink(%+v)", *p)
+
}
func (p *TMysqlTableSink) DeepEqual(ano *TMysqlTableSink) bool {
@@ -5714,7 +6562,6 @@ func NewTOdbcTableSink() *TOdbcTableSink {
}
func (p *TOdbcTableSink) InitDefault() {
- *p = TOdbcTableSink{}
}
var TOdbcTableSink_ConnectString_DEFAULT string
@@ -5795,37 +6642,30 @@ func (p *TOdbcTableSink) Read(iprot thrift.TProtocol) (err error) {
if err = p.ReadField1(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 2:
if fieldTypeId == thrift.STRING {
if err = p.ReadField2(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 3:
if fieldTypeId == thrift.BOOL {
if err = p.ReadField3(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
default:
if err = iprot.Skip(fieldTypeId); err != nil {
goto SkipFieldError
}
}
-
if err = iprot.ReadFieldEnd(); err != nil {
goto ReadFieldEndError
}
@@ -5851,29 +6691,36 @@ ReadStructEndError:
}
func (p *TOdbcTableSink) ReadField1(iprot thrift.TProtocol) error {
+
+ var _field *string
if v, err := iprot.ReadString(); err != nil {
return err
} else {
- p.ConnectString = &v
+ _field = &v
}
+ p.ConnectString = _field
return nil
}
-
func (p *TOdbcTableSink) ReadField2(iprot thrift.TProtocol) error {
+
+ var _field *string
if v, err := iprot.ReadString(); err != nil {
return err
} else {
- p.Table = &v
+ _field = &v
}
+ p.Table = _field
return nil
}
-
func (p *TOdbcTableSink) ReadField3(iprot thrift.TProtocol) error {
+
+ var _field *bool
if v, err := iprot.ReadBool(); err != nil {
return err
} else {
- p.UseTransaction = &v
+ _field = &v
}
+ p.UseTransaction = _field
return nil
}
@@ -5895,7 +6742,6 @@ func (p *TOdbcTableSink) Write(oprot thrift.TProtocol) (err error) {
fieldId = 3
goto WriteFieldError
}
-
}
if err = oprot.WriteFieldStop(); err != nil {
goto WriteFieldStopError
@@ -5976,6 +6822,7 @@ func (p *TOdbcTableSink) String() string {
return ""
}
return fmt.Sprintf("TOdbcTableSink(%+v)", *p)
+
}
func (p *TOdbcTableSink) DeepEqual(ano *TOdbcTableSink) bool {
@@ -6045,7 +6892,6 @@ func NewTJdbcTableSink() *TJdbcTableSink {
}
func (p *TJdbcTableSink) InitDefault() {
- *p = TJdbcTableSink{}
}
var TJdbcTableSink_JdbcTable_DEFAULT *descriptors.TJdbcTable
@@ -6143,47 +6989,38 @@ func (p *TJdbcTableSink) Read(iprot thrift.TProtocol) (err error) {
if err = p.ReadField1(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 2:
if fieldTypeId == thrift.BOOL {
if err = p.ReadField2(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 3:
if fieldTypeId == thrift.I32 {
if err = p.ReadField3(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 4:
if fieldTypeId == thrift.STRING {
if err = p.ReadField4(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
default:
if err = iprot.Skip(fieldTypeId); err != nil {
goto SkipFieldError
}
}
-
if err = iprot.ReadFieldEnd(); err != nil {
goto ReadFieldEndError
}
@@ -6209,38 +7046,45 @@ ReadStructEndError:
}
func (p *TJdbcTableSink) ReadField1(iprot thrift.TProtocol) error {
- p.JdbcTable = descriptors.NewTJdbcTable()
- if err := p.JdbcTable.Read(iprot); err != nil {
+ _field := descriptors.NewTJdbcTable()
+ if err := _field.Read(iprot); err != nil {
return err
}
+ p.JdbcTable = _field
return nil
}
-
func (p *TJdbcTableSink) ReadField2(iprot thrift.TProtocol) error {
+
+ var _field *bool
if v, err := iprot.ReadBool(); err != nil {
return err
} else {
- p.UseTransaction = &v
+ _field = &v
}
+ p.UseTransaction = _field
return nil
}
-
func (p *TJdbcTableSink) ReadField3(iprot thrift.TProtocol) error {
+
+ var _field *types.TOdbcTableType
if v, err := iprot.ReadI32(); err != nil {
return err
} else {
tmp := types.TOdbcTableType(v)
- p.TableType = &tmp
+ _field = &tmp
}
+ p.TableType = _field
return nil
}
-
func (p *TJdbcTableSink) ReadField4(iprot thrift.TProtocol) error {
+
+ var _field *string
if v, err := iprot.ReadString(); err != nil {
return err
} else {
- p.InsertSql = &v
+ _field = &v
}
+ p.InsertSql = _field
return nil
}
@@ -6266,7 +7110,6 @@ func (p *TJdbcTableSink) Write(oprot thrift.TProtocol) (err error) {
fieldId = 4
goto WriteFieldError
}
-
}
if err = oprot.WriteFieldStop(); err != nil {
goto WriteFieldStopError
@@ -6366,6 +7209,7 @@ func (p *TJdbcTableSink) String() string {
return ""
}
return fmt.Sprintf("TJdbcTableSink(%+v)", *p)
+
}
func (p *TJdbcTableSink) DeepEqual(ano *TJdbcTableSink) bool {
@@ -6448,7 +7292,6 @@ func NewTExportSink() *TExportSink {
}
func (p *TExportSink) InitDefault() {
- *p = TExportSink{}
}
func (p *TExportSink) GetFileType() (v types.TFileType) {
@@ -6566,10 +7409,8 @@ func (p *TExportSink) Read(iprot thrift.TProtocol) (err error) {
goto ReadFieldError
}
issetFileType = true
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 2:
if fieldTypeId == thrift.STRING {
@@ -6577,10 +7418,8 @@ func (p *TExportSink) Read(iprot thrift.TProtocol) (err error) {
goto ReadFieldError
}
issetExportPath = true
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 3:
if fieldTypeId == thrift.STRING {
@@ -6588,10 +7427,8 @@ func (p *TExportSink) Read(iprot thrift.TProtocol) (err error) {
goto ReadFieldError
}
issetColumnSeparator = true
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 4:
if fieldTypeId == thrift.STRING {
@@ -6599,47 +7436,38 @@ func (p *TExportSink) Read(iprot thrift.TProtocol) (err error) {
goto ReadFieldError
}
issetLineDelimiter = true
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 5:
if fieldTypeId == thrift.LIST {
if err = p.ReadField5(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 6:
if fieldTypeId == thrift.MAP {
if err = p.ReadField6(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 7:
if fieldTypeId == thrift.STRING {
if err = p.ReadField7(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
default:
if err = iprot.Skip(fieldTypeId); err != nil {
goto SkipFieldError
}
}
-
if err = iprot.ReadFieldEnd(); err != nil {
goto ReadFieldEndError
}
@@ -6686,67 +7514,78 @@ RequiredFieldNotSetError:
}
func (p *TExportSink) ReadField1(iprot thrift.TProtocol) error {
+
+ var _field types.TFileType
if v, err := iprot.ReadI32(); err != nil {
return err
} else {
- p.FileType = types.TFileType(v)
+ _field = types.TFileType(v)
}
+ p.FileType = _field
return nil
}
-
func (p *TExportSink) ReadField2(iprot thrift.TProtocol) error {
+
+ var _field string
if v, err := iprot.ReadString(); err != nil {
return err
} else {
- p.ExportPath = v
+ _field = v
}
+ p.ExportPath = _field
return nil
}
-
func (p *TExportSink) ReadField3(iprot thrift.TProtocol) error {
+
+ var _field string
if v, err := iprot.ReadString(); err != nil {
return err
} else {
- p.ColumnSeparator = v
+ _field = v
}
+ p.ColumnSeparator = _field
return nil
}
-
func (p *TExportSink) ReadField4(iprot thrift.TProtocol) error {
+
+ var _field string
if v, err := iprot.ReadString(); err != nil {
return err
} else {
- p.LineDelimiter = v
+ _field = v
}
+ p.LineDelimiter = _field
return nil
}
-
func (p *TExportSink) ReadField5(iprot thrift.TProtocol) error {
_, size, err := iprot.ReadListBegin()
if err != nil {
return err
}
- p.BrokerAddresses = make([]*types.TNetworkAddress, 0, size)
+ _field := make([]*types.TNetworkAddress, 0, size)
+ values := make([]types.TNetworkAddress, size)
for i := 0; i < size; i++ {
- _elem := types.NewTNetworkAddress()
+ _elem := &values[i]
+ _elem.InitDefault()
+
if err := _elem.Read(iprot); err != nil {
return err
}
- p.BrokerAddresses = append(p.BrokerAddresses, _elem)
+ _field = append(_field, _elem)
}
if err := iprot.ReadListEnd(); err != nil {
return err
}
+ p.BrokerAddresses = _field
return nil
}
-
func (p *TExportSink) ReadField6(iprot thrift.TProtocol) error {
_, _, size, err := iprot.ReadMapBegin()
if err != nil {
return err
}
- p.Properties = make(map[string]string, size)
+ _field := make(map[string]string, size)
for i := 0; i < size; i++ {
var _key string
if v, err := iprot.ReadString(); err != nil {
@@ -6762,20 +7601,23 @@ func (p *TExportSink) ReadField6(iprot thrift.TProtocol) error {
_val = v
}
- p.Properties[_key] = _val
+ _field[_key] = _val
}
if err := iprot.ReadMapEnd(); err != nil {
return err
}
+ p.Properties = _field
return nil
}
-
func (p *TExportSink) ReadField7(iprot thrift.TProtocol) error {
+
+ var _field *string
if v, err := iprot.ReadString(); err != nil {
return err
} else {
- p.Header = &v
+ _field = &v
}
+ p.Header = _field
return nil
}
@@ -6813,7 +7655,6 @@ func (p *TExportSink) Write(oprot thrift.TProtocol) (err error) {
fieldId = 7
goto WriteFieldError
}
-
}
if err = oprot.WriteFieldStop(); err != nil {
goto WriteFieldStopError
@@ -6936,11 +7777,9 @@ func (p *TExportSink) writeField6(oprot thrift.TProtocol) (err error) {
return err
}
for k, v := range p.Properties {
-
if err := oprot.WriteString(k); err != nil {
return err
}
-
if err := oprot.WriteString(v); err != nil {
return err
}
@@ -6983,6 +7822,7 @@ func (p *TExportSink) String() string {
return ""
}
return fmt.Sprintf("TExportSink(%+v)", *p)
+
}
func (p *TExportSink) DeepEqual(ano *TExportSink) bool {
@@ -7103,6 +7943,10 @@ type TOlapTableSink struct {
SlaveLocation *descriptors.TOlapTableLocationParam `thrift:"slave_location,18,optional" frugal:"18,optional,descriptors.TOlapTableLocationParam" json:"slave_location,omitempty"`
TxnTimeoutS *int64 `thrift:"txn_timeout_s,19,optional" frugal:"19,optional,i64" json:"txn_timeout_s,omitempty"`
WriteFileCache *bool `thrift:"write_file_cache,20,optional" frugal:"20,optional,bool" json:"write_file_cache,omitempty"`
+ BaseSchemaVersion *int64 `thrift:"base_schema_version,21,optional" frugal:"21,optional,i64" json:"base_schema_version,omitempty"`
+ GroupCommitMode *TGroupCommitMode `thrift:"group_commit_mode,22,optional" frugal:"22,optional,TGroupCommitMode" json:"group_commit_mode,omitempty"`
+ MaxFilterRatio *float64 `thrift:"max_filter_ratio,23,optional" frugal:"23,optional,double" json:"max_filter_ratio,omitempty"`
+ StorageVaultId *string `thrift:"storage_vault_id,24,optional" frugal:"24,optional,string" json:"storage_vault_id,omitempty"`
}
func NewTOlapTableSink() *TOlapTableSink {
@@ -7110,7 +7954,6 @@ func NewTOlapTableSink() *TOlapTableSink {
}
func (p *TOlapTableSink) InitDefault() {
- *p = TOlapTableSink{}
}
var TOlapTableSink_LoadId_DEFAULT *types.TUniqueId
@@ -7262,6 +8105,42 @@ func (p *TOlapTableSink) GetWriteFileCache() (v bool) {
}
return *p.WriteFileCache
}
+
+var TOlapTableSink_BaseSchemaVersion_DEFAULT int64
+
+func (p *TOlapTableSink) GetBaseSchemaVersion() (v int64) {
+ if !p.IsSetBaseSchemaVersion() {
+ return TOlapTableSink_BaseSchemaVersion_DEFAULT
+ }
+ return *p.BaseSchemaVersion
+}
+
+var TOlapTableSink_GroupCommitMode_DEFAULT TGroupCommitMode
+
+func (p *TOlapTableSink) GetGroupCommitMode() (v TGroupCommitMode) {
+ if !p.IsSetGroupCommitMode() {
+ return TOlapTableSink_GroupCommitMode_DEFAULT
+ }
+ return *p.GroupCommitMode
+}
+
+var TOlapTableSink_MaxFilterRatio_DEFAULT float64
+
+func (p *TOlapTableSink) GetMaxFilterRatio() (v float64) {
+ if !p.IsSetMaxFilterRatio() {
+ return TOlapTableSink_MaxFilterRatio_DEFAULT
+ }
+ return *p.MaxFilterRatio
+}
+
+var TOlapTableSink_StorageVaultId_DEFAULT string
+
+func (p *TOlapTableSink) GetStorageVaultId() (v string) {
+ if !p.IsSetStorageVaultId() {
+ return TOlapTableSink_StorageVaultId_DEFAULT
+ }
+ return *p.StorageVaultId
+}
func (p *TOlapTableSink) SetLoadId(val *types.TUniqueId) {
p.LoadId = val
}
@@ -7322,6 +8201,18 @@ func (p *TOlapTableSink) SetTxnTimeoutS(val *int64) {
func (p *TOlapTableSink) SetWriteFileCache(val *bool) {
p.WriteFileCache = val
}
+func (p *TOlapTableSink) SetBaseSchemaVersion(val *int64) {
+ p.BaseSchemaVersion = val
+}
+func (p *TOlapTableSink) SetGroupCommitMode(val *TGroupCommitMode) {
+ p.GroupCommitMode = val
+}
+func (p *TOlapTableSink) SetMaxFilterRatio(val *float64) {
+ p.MaxFilterRatio = val
+}
+func (p *TOlapTableSink) SetStorageVaultId(val *string) {
+ p.StorageVaultId = val
+}
var fieldIDToName_TOlapTableSink = map[int16]string{
1: "load_id",
@@ -7344,6 +8235,10 @@ var fieldIDToName_TOlapTableSink = map[int16]string{
18: "slave_location",
19: "txn_timeout_s",
20: "write_file_cache",
+ 21: "base_schema_version",
+ 22: "group_commit_mode",
+ 23: "max_filter_ratio",
+ 24: "storage_vault_id",
}
func (p *TOlapTableSink) IsSetLoadId() bool {
@@ -7402,6 +8297,22 @@ func (p *TOlapTableSink) IsSetWriteFileCache() bool {
return p.WriteFileCache != nil
}
+func (p *TOlapTableSink) IsSetBaseSchemaVersion() bool {
+ return p.BaseSchemaVersion != nil
+}
+
+func (p *TOlapTableSink) IsSetGroupCommitMode() bool {
+ return p.GroupCommitMode != nil
+}
+
+func (p *TOlapTableSink) IsSetMaxFilterRatio() bool {
+ return p.MaxFilterRatio != nil
+}
+
+func (p *TOlapTableSink) IsSetStorageVaultId() bool {
+ return p.StorageVaultId != nil
+}
+
func (p *TOlapTableSink) Read(iprot thrift.TProtocol) (err error) {
var fieldTypeId thrift.TType
@@ -7438,10 +8349,8 @@ func (p *TOlapTableSink) Read(iprot thrift.TProtocol) (err error) {
goto ReadFieldError
}
issetLoadId = true
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 2:
if fieldTypeId == thrift.I64 {
@@ -7449,10 +8358,8 @@ func (p *TOlapTableSink) Read(iprot thrift.TProtocol) (err error) {
goto ReadFieldError
}
issetTxnId = true
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 3:
if fieldTypeId == thrift.I64 {
@@ -7460,10 +8367,8 @@ func (p *TOlapTableSink) Read(iprot thrift.TProtocol) (err error) {
goto ReadFieldError
}
issetDbId = true
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 4:
if fieldTypeId == thrift.I64 {
@@ -7471,10 +8376,8 @@ func (p *TOlapTableSink) Read(iprot thrift.TProtocol) (err error) {
goto ReadFieldError
}
issetTableId = true
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 5:
if fieldTypeId == thrift.I32 {
@@ -7482,10 +8385,8 @@ func (p *TOlapTableSink) Read(iprot thrift.TProtocol) (err error) {
goto ReadFieldError
}
issetTupleId = true
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 6:
if fieldTypeId == thrift.I32 {
@@ -7493,10 +8394,8 @@ func (p *TOlapTableSink) Read(iprot thrift.TProtocol) (err error) {
goto ReadFieldError
}
issetNumReplicas = true
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 7:
if fieldTypeId == thrift.BOOL {
@@ -7504,30 +8403,24 @@ func (p *TOlapTableSink) Read(iprot thrift.TProtocol) (err error) {
goto ReadFieldError
}
issetNeedGenRollup = true
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 8:
if fieldTypeId == thrift.STRING {
if err = p.ReadField8(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 9:
if fieldTypeId == thrift.STRING {
if err = p.ReadField9(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 10:
if fieldTypeId == thrift.STRUCT {
@@ -7535,10 +8428,8 @@ func (p *TOlapTableSink) Read(iprot thrift.TProtocol) (err error) {
goto ReadFieldError
}
issetSchema = true
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 11:
if fieldTypeId == thrift.STRUCT {
@@ -7546,10 +8437,8 @@ func (p *TOlapTableSink) Read(iprot thrift.TProtocol) (err error) {
goto ReadFieldError
}
issetPartition = true
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 12:
if fieldTypeId == thrift.STRUCT {
@@ -7557,10 +8446,8 @@ func (p *TOlapTableSink) Read(iprot thrift.TProtocol) (err error) {
goto ReadFieldError
}
issetLocation = true
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 13:
if fieldTypeId == thrift.STRUCT {
@@ -7568,91 +8455,106 @@ func (p *TOlapTableSink) Read(iprot thrift.TProtocol) (err error) {
goto ReadFieldError
}
issetNodesInfo = true
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 14:
if fieldTypeId == thrift.I64 {
if err = p.ReadField14(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 15:
if fieldTypeId == thrift.I32 {
if err = p.ReadField15(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 16:
if fieldTypeId == thrift.BOOL {
if err = p.ReadField16(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 17:
if fieldTypeId == thrift.BOOL {
if err = p.ReadField17(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 18:
if fieldTypeId == thrift.STRUCT {
if err = p.ReadField18(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 19:
if fieldTypeId == thrift.I64 {
if err = p.ReadField19(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
- }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
case 20:
if fieldTypeId == thrift.BOOL {
if err = p.ReadField20(iprot); err != nil {
goto ReadFieldError
}
- } else {
- if err = iprot.Skip(fieldTypeId); err != nil {
- goto SkipFieldError
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ case 21:
+ if fieldTypeId == thrift.I64 {
+ if err = p.ReadField21(iprot); err != nil {
+ goto ReadFieldError
}
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
}
- default:
- if err = iprot.Skip(fieldTypeId); err != nil {
+ case 22:
+ if fieldTypeId == thrift.I32 {
+ if err = p.ReadField22(iprot); err != nil {
+ goto ReadFieldError
+ }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
goto SkipFieldError
}
- }
-
- if err = iprot.ReadFieldEnd(); err != nil {
- goto ReadFieldEndError
- }
- }
+ case 23:
+ if fieldTypeId == thrift.DOUBLE {
+ if err = p.ReadField23(iprot); err != nil {
+ goto ReadFieldError
+ }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ case 24:
+ if fieldTypeId == thrift.STRING {
+ if err = p.ReadField24(iprot); err != nil {
+ goto ReadFieldError
+ }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ default:
+ if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ }
+ if err = iprot.ReadFieldEnd(); err != nil {
+ goto ReadFieldEndError
+ }
+ }
if err = iprot.ReadStructEnd(); err != nil {
goto ReadStructEndError
}
@@ -7730,176 +8632,250 @@ RequiredFieldNotSetError:
}
func (p *TOlapTableSink) ReadField1(iprot thrift.TProtocol) error {
- p.LoadId = types.NewTUniqueId()
- if err := p.LoadId.Read(iprot); err != nil {
+ _field := types.NewTUniqueId()
+ if err := _field.Read(iprot); err != nil {
return err
}
+ p.LoadId = _field
return nil
}
-
func (p *TOlapTableSink) ReadField2(iprot thrift.TProtocol) error {
+
+ var _field int64
if v, err := iprot.ReadI64(); err != nil {
return err
} else {
- p.TxnId = v
+ _field = v
}
+ p.TxnId = _field
return nil
}
-
func (p *TOlapTableSink) ReadField3(iprot thrift.TProtocol) error {
+
+ var _field int64
if v, err := iprot.ReadI64(); err != nil {
return err
} else {
- p.DbId = v
+ _field = v
}
+ p.DbId = _field
return nil
}
-
func (p *TOlapTableSink) ReadField4(iprot thrift.TProtocol) error {
+
+ var _field int64
if v, err := iprot.ReadI64(); err != nil {
return err
} else {
- p.TableId = v
+ _field = v
}
+ p.TableId = _field
return nil
}
-
func (p *TOlapTableSink) ReadField5(iprot thrift.TProtocol) error {
+
+ var _field int32
if v, err := iprot.ReadI32(); err != nil {
return err
} else {
- p.TupleId = v
+ _field = v
}
+ p.TupleId = _field
return nil
}
-
func (p *TOlapTableSink) ReadField6(iprot thrift.TProtocol) error {
+
+ var _field int32
if v, err := iprot.ReadI32(); err != nil {
return err
} else {
- p.NumReplicas = v
+ _field = v
}
+ p.NumReplicas = _field
return nil
}
-
func (p *TOlapTableSink) ReadField7(iprot thrift.TProtocol) error {
+
+ var _field bool
if v, err := iprot.ReadBool(); err != nil {
return err
} else {
- p.NeedGenRollup = v
+ _field = v
}
+ p.NeedGenRollup = _field
return nil
}
-
func (p *TOlapTableSink) ReadField8(iprot thrift.TProtocol) error {
+
+ var _field *string
if v, err := iprot.ReadString(); err != nil {
return err
} else {
- p.DbName = &v
+ _field = &v
}
+ p.DbName = _field
return nil
}
-
func (p *TOlapTableSink) ReadField9(iprot thrift.TProtocol) error {
+
+ var _field *string
if v, err := iprot.ReadString(); err != nil {
return err
} else {
- p.TableName = &v
+ _field = &v
}
+ p.TableName = _field
return nil
}
-
func (p *TOlapTableSink) ReadField10(iprot thrift.TProtocol) error {
- p.Schema = descriptors.NewTOlapTableSchemaParam()
- if err := p.Schema.Read(iprot); err != nil {
+ _field := descriptors.NewTOlapTableSchemaParam()
+ if err := _field.Read(iprot); err != nil {
return err
}
+ p.Schema = _field
return nil
}
-
func (p *TOlapTableSink) ReadField11(iprot thrift.TProtocol) error {
- p.Partition = descriptors.NewTOlapTablePartitionParam()
- if err := p.Partition.Read(iprot); err != nil {
+ _field := descriptors.NewTOlapTablePartitionParam()
+ if err := _field.Read(iprot); err != nil {
return err
}
+ p.Partition = _field
return nil
}
-
func (p *TOlapTableSink) ReadField12(iprot thrift.TProtocol) error {
- p.Location = descriptors.NewTOlapTableLocationParam()
- if err := p.Location.Read(iprot); err != nil {
+ _field := descriptors.NewTOlapTableLocationParam()
+ if err := _field.Read(iprot); err != nil {
return err
}
+ p.Location = _field
return nil
}
-
func (p *TOlapTableSink) ReadField13(iprot thrift.TProtocol) error {
- p.NodesInfo = descriptors.NewTPaloNodesInfo()
- if err := p.NodesInfo.Read(iprot); err != nil {
+ _field := descriptors.NewTPaloNodesInfo()
+ if err := _field.Read(iprot); err != nil {
return err
}
+ p.NodesInfo = _field
return nil
}
-
func (p *TOlapTableSink) ReadField14(iprot thrift.TProtocol) error {
+
+ var _field *int64
if v, err := iprot.ReadI64(); err != nil {
return err
} else {
- p.LoadChannelTimeoutS = &v
+ _field = &v
}
+ p.LoadChannelTimeoutS = _field
return nil
}
-
func (p *TOlapTableSink) ReadField15(iprot thrift.TProtocol) error {
+
+ var _field *int32
if v, err := iprot.ReadI32(); err != nil {
return err
} else {
- p.SendBatchParallelism = &v
+ _field = &v
}
+ p.SendBatchParallelism = _field
return nil
}
-
func (p *TOlapTableSink) ReadField16(iprot thrift.TProtocol) error {
+
+ var _field *bool
if v, err := iprot.ReadBool(); err != nil {
return err
} else {
- p.LoadToSingleTablet = &v
+ _field = &v
}
+ p.LoadToSingleTablet = _field
return nil
}
-
func (p *TOlapTableSink) ReadField17(iprot thrift.TProtocol) error {
+
+ var _field *bool
if v, err := iprot.ReadBool(); err != nil {
return err
} else {
- p.WriteSingleReplica = &v
+ _field = &v
}
+ p.WriteSingleReplica = _field
return nil
}
-
func (p *TOlapTableSink) ReadField18(iprot thrift.TProtocol) error {
- p.SlaveLocation = descriptors.NewTOlapTableLocationParam()
- if err := p.SlaveLocation.Read(iprot); err != nil {
+ _field := descriptors.NewTOlapTableLocationParam()
+ if err := _field.Read(iprot); err != nil {
return err
}
+ p.SlaveLocation = _field
return nil
}
-
func (p *TOlapTableSink) ReadField19(iprot thrift.TProtocol) error {
+
+ var _field *int64
if v, err := iprot.ReadI64(); err != nil {
return err
} else {
- p.TxnTimeoutS = &v
+ _field = &v
}
+ p.TxnTimeoutS = _field
return nil
}
-
func (p *TOlapTableSink) ReadField20(iprot thrift.TProtocol) error {
+
+ var _field *bool
if v, err := iprot.ReadBool(); err != nil {
return err
} else {
- p.WriteFileCache = &v
+ _field = &v
+ }
+ p.WriteFileCache = _field
+ return nil
+}
+func (p *TOlapTableSink) ReadField21(iprot thrift.TProtocol) error {
+
+ var _field *int64
+ if v, err := iprot.ReadI64(); err != nil {
+ return err
+ } else {
+ _field = &v
+ }
+ p.BaseSchemaVersion = _field
+ return nil
+}
+func (p *TOlapTableSink) ReadField22(iprot thrift.TProtocol) error {
+
+ var _field *TGroupCommitMode
+ if v, err := iprot.ReadI32(); err != nil {
+ return err
+ } else {
+ tmp := TGroupCommitMode(v)
+ _field = &tmp
+ }
+ p.GroupCommitMode = _field
+ return nil
+}
+func (p *TOlapTableSink) ReadField23(iprot thrift.TProtocol) error {
+
+ var _field *float64
+ if v, err := iprot.ReadDouble(); err != nil {
+ return err
+ } else {
+ _field = &v
+ }
+ p.MaxFilterRatio = _field
+ return nil
+}
+func (p *TOlapTableSink) ReadField24(iprot thrift.TProtocol) error {
+
+ var _field *string
+ if v, err := iprot.ReadString(); err != nil {
+ return err
+ } else {
+ _field = &v
}
+ p.StorageVaultId = _field
return nil
}
@@ -7989,7 +8965,22 @@ func (p *TOlapTableSink) Write(oprot thrift.TProtocol) (err error) {
fieldId = 20
goto WriteFieldError
}
-
+ if err = p.writeField21(oprot); err != nil {
+ fieldId = 21
+ goto WriteFieldError
+ }
+ if err = p.writeField22(oprot); err != nil {
+ fieldId = 22
+ goto WriteFieldError
+ }
+ if err = p.writeField23(oprot); err != nil {
+ fieldId = 23
+ goto WriteFieldError
+ }
+ if err = p.writeField24(oprot); err != nil {
+ fieldId = 24
+ goto WriteFieldError
+ }
}
if err = oprot.WriteFieldStop(); err != nil {
goto WriteFieldStopError
@@ -8366,11 +9357,88 @@ WriteFieldEndError:
return thrift.PrependError(fmt.Sprintf("%T write field 20 end error: ", p), err)
}
+func (p *TOlapTableSink) writeField21(oprot thrift.TProtocol) (err error) {
+ if p.IsSetBaseSchemaVersion() {
+ if err = oprot.WriteFieldBegin("base_schema_version", thrift.I64, 21); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteI64(*p.BaseSchemaVersion); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 21 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 21 end error: ", p), err)
+}
+
+func (p *TOlapTableSink) writeField22(oprot thrift.TProtocol) (err error) {
+ if p.IsSetGroupCommitMode() {
+ if err = oprot.WriteFieldBegin("group_commit_mode", thrift.I32, 22); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteI32(int32(*p.GroupCommitMode)); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 22 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 22 end error: ", p), err)
+}
+
+func (p *TOlapTableSink) writeField23(oprot thrift.TProtocol) (err error) {
+ if p.IsSetMaxFilterRatio() {
+ if err = oprot.WriteFieldBegin("max_filter_ratio", thrift.DOUBLE, 23); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteDouble(*p.MaxFilterRatio); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 23 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 23 end error: ", p), err)
+}
+
+func (p *TOlapTableSink) writeField24(oprot thrift.TProtocol) (err error) {
+ if p.IsSetStorageVaultId() {
+ if err = oprot.WriteFieldBegin("storage_vault_id", thrift.STRING, 24); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteString(*p.StorageVaultId); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 24 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 24 end error: ", p), err)
+}
+
func (p *TOlapTableSink) String() string {
if p == nil {
return ""
}
return fmt.Sprintf("TOlapTableSink(%+v)", *p)
+
}
func (p *TOlapTableSink) DeepEqual(ano *TOlapTableSink) bool {
@@ -8439,6 +9507,18 @@ func (p *TOlapTableSink) DeepEqual(ano *TOlapTableSink) bool {
if !p.Field20DeepEqual(ano.WriteFileCache) {
return false
}
+ if !p.Field21DeepEqual(ano.BaseSchemaVersion) {
+ return false
+ }
+ if !p.Field22DeepEqual(ano.GroupCommitMode) {
+ return false
+ }
+ if !p.Field23DeepEqual(ano.MaxFilterRatio) {
+ return false
+ }
+ if !p.Field24DeepEqual(ano.StorageVaultId) {
+ return false
+ }
return true
}
@@ -8622,122 +9702,6783 @@ func (p *TOlapTableSink) Field20DeepEqual(src *bool) bool {
}
return true
}
+func (p *TOlapTableSink) Field21DeepEqual(src *int64) bool {
-type TDataSink struct {
- Type TDataSinkType `thrift:"type,1,required" frugal:"1,required,TDataSinkType" json:"type"`
- StreamSink *TDataStreamSink `thrift:"stream_sink,2,optional" frugal:"2,optional,TDataStreamSink" json:"stream_sink,omitempty"`
- ResultSink *TResultSink `thrift:"result_sink,3,optional" frugal:"3,optional,TResultSink" json:"result_sink,omitempty"`
- MysqlTableSink *TMysqlTableSink `thrift:"mysql_table_sink,5,optional" frugal:"5,optional,TMysqlTableSink" json:"mysql_table_sink,omitempty"`
- ExportSink *TExportSink `thrift:"export_sink,6,optional" frugal:"6,optional,TExportSink" json:"export_sink,omitempty"`
- OlapTableSink *TOlapTableSink `thrift:"olap_table_sink,7,optional" frugal:"7,optional,TOlapTableSink" json:"olap_table_sink,omitempty"`
- MemoryScratchSink *TMemoryScratchSink `thrift:"memory_scratch_sink,8,optional" frugal:"8,optional,TMemoryScratchSink" json:"memory_scratch_sink,omitempty"`
- OdbcTableSink *TOdbcTableSink `thrift:"odbc_table_sink,9,optional" frugal:"9,optional,TOdbcTableSink" json:"odbc_table_sink,omitempty"`
- ResultFileSink *TResultFileSink `thrift:"result_file_sink,10,optional" frugal:"10,optional,TResultFileSink" json:"result_file_sink,omitempty"`
- JdbcTableSink *TJdbcTableSink `thrift:"jdbc_table_sink,11,optional" frugal:"11,optional,TJdbcTableSink" json:"jdbc_table_sink,omitempty"`
- MultiCastStreamSink *TMultiCastDataStreamSink `thrift:"multi_cast_stream_sink,12,optional" frugal:"12,optional,TMultiCastDataStreamSink" json:"multi_cast_stream_sink,omitempty"`
+ if p.BaseSchemaVersion == src {
+ return true
+ } else if p.BaseSchemaVersion == nil || src == nil {
+ return false
+ }
+ if *p.BaseSchemaVersion != *src {
+ return false
+ }
+ return true
}
+func (p *TOlapTableSink) Field22DeepEqual(src *TGroupCommitMode) bool {
-func NewTDataSink() *TDataSink {
- return &TDataSink{}
+ if p.GroupCommitMode == src {
+ return true
+ } else if p.GroupCommitMode == nil || src == nil {
+ return false
+ }
+ if *p.GroupCommitMode != *src {
+ return false
+ }
+ return true
}
+func (p *TOlapTableSink) Field23DeepEqual(src *float64) bool {
-func (p *TDataSink) InitDefault() {
- *p = TDataSink{}
+ if p.MaxFilterRatio == src {
+ return true
+ } else if p.MaxFilterRatio == nil || src == nil {
+ return false
+ }
+ if *p.MaxFilterRatio != *src {
+ return false
+ }
+ return true
}
+func (p *TOlapTableSink) Field24DeepEqual(src *string) bool {
-func (p *TDataSink) GetType() (v TDataSinkType) {
- return p.Type
+ if p.StorageVaultId == src {
+ return true
+ } else if p.StorageVaultId == nil || src == nil {
+ return false
+ }
+ if strings.Compare(*p.StorageVaultId, *src) != 0 {
+ return false
+ }
+ return true
}
-var TDataSink_StreamSink_DEFAULT *TDataStreamSink
-
-func (p *TDataSink) GetStreamSink() (v *TDataStreamSink) {
- if !p.IsSetStreamSink() {
- return TDataSink_StreamSink_DEFAULT
- }
- return p.StreamSink
+type THiveLocationParams struct {
+ WritePath *string `thrift:"write_path,1,optional" frugal:"1,optional,string" json:"write_path,omitempty"`
+ TargetPath *string `thrift:"target_path,2,optional" frugal:"2,optional,string" json:"target_path,omitempty"`
+ FileType *types.TFileType `thrift:"file_type,3,optional" frugal:"3,optional,TFileType" json:"file_type,omitempty"`
+ OriginalWritePath *string `thrift:"original_write_path,4,optional" frugal:"4,optional,string" json:"original_write_path,omitempty"`
}
-var TDataSink_ResultSink_DEFAULT *TResultSink
+func NewTHiveLocationParams() *THiveLocationParams {
+ return &THiveLocationParams{}
+}
-func (p *TDataSink) GetResultSink() (v *TResultSink) {
- if !p.IsSetResultSink() {
- return TDataSink_ResultSink_DEFAULT
- }
- return p.ResultSink
+func (p *THiveLocationParams) InitDefault() {
}
-var TDataSink_MysqlTableSink_DEFAULT *TMysqlTableSink
+var THiveLocationParams_WritePath_DEFAULT string
-func (p *TDataSink) GetMysqlTableSink() (v *TMysqlTableSink) {
- if !p.IsSetMysqlTableSink() {
- return TDataSink_MysqlTableSink_DEFAULT
+func (p *THiveLocationParams) GetWritePath() (v string) {
+ if !p.IsSetWritePath() {
+ return THiveLocationParams_WritePath_DEFAULT
}
- return p.MysqlTableSink
+ return *p.WritePath
}
-var TDataSink_ExportSink_DEFAULT *TExportSink
+var THiveLocationParams_TargetPath_DEFAULT string
-func (p *TDataSink) GetExportSink() (v *TExportSink) {
- if !p.IsSetExportSink() {
- return TDataSink_ExportSink_DEFAULT
+func (p *THiveLocationParams) GetTargetPath() (v string) {
+ if !p.IsSetTargetPath() {
+ return THiveLocationParams_TargetPath_DEFAULT
}
- return p.ExportSink
+ return *p.TargetPath
}
-var TDataSink_OlapTableSink_DEFAULT *TOlapTableSink
+var THiveLocationParams_FileType_DEFAULT types.TFileType
-func (p *TDataSink) GetOlapTableSink() (v *TOlapTableSink) {
- if !p.IsSetOlapTableSink() {
- return TDataSink_OlapTableSink_DEFAULT
+func (p *THiveLocationParams) GetFileType() (v types.TFileType) {
+ if !p.IsSetFileType() {
+ return THiveLocationParams_FileType_DEFAULT
}
- return p.OlapTableSink
+ return *p.FileType
}
-var TDataSink_MemoryScratchSink_DEFAULT *TMemoryScratchSink
+var THiveLocationParams_OriginalWritePath_DEFAULT string
-func (p *TDataSink) GetMemoryScratchSink() (v *TMemoryScratchSink) {
- if !p.IsSetMemoryScratchSink() {
- return TDataSink_MemoryScratchSink_DEFAULT
+func (p *THiveLocationParams) GetOriginalWritePath() (v string) {
+ if !p.IsSetOriginalWritePath() {
+ return THiveLocationParams_OriginalWritePath_DEFAULT
}
- return p.MemoryScratchSink
+ return *p.OriginalWritePath
+}
+func (p *THiveLocationParams) SetWritePath(val *string) {
+ p.WritePath = val
+}
+func (p *THiveLocationParams) SetTargetPath(val *string) {
+ p.TargetPath = val
+}
+func (p *THiveLocationParams) SetFileType(val *types.TFileType) {
+ p.FileType = val
+}
+func (p *THiveLocationParams) SetOriginalWritePath(val *string) {
+ p.OriginalWritePath = val
}
-var TDataSink_OdbcTableSink_DEFAULT *TOdbcTableSink
-
-func (p *TDataSink) GetOdbcTableSink() (v *TOdbcTableSink) {
- if !p.IsSetOdbcTableSink() {
- return TDataSink_OdbcTableSink_DEFAULT
- }
- return p.OdbcTableSink
+var fieldIDToName_THiveLocationParams = map[int16]string{
+ 1: "write_path",
+ 2: "target_path",
+ 3: "file_type",
+ 4: "original_write_path",
}
-var TDataSink_ResultFileSink_DEFAULT *TResultFileSink
+func (p *THiveLocationParams) IsSetWritePath() bool {
+ return p.WritePath != nil
+}
-func (p *TDataSink) GetResultFileSink() (v *TResultFileSink) {
- if !p.IsSetResultFileSink() {
- return TDataSink_ResultFileSink_DEFAULT
- }
- return p.ResultFileSink
+func (p *THiveLocationParams) IsSetTargetPath() bool {
+ return p.TargetPath != nil
}
-var TDataSink_JdbcTableSink_DEFAULT *TJdbcTableSink
+func (p *THiveLocationParams) IsSetFileType() bool {
+ return p.FileType != nil
+}
-func (p *TDataSink) GetJdbcTableSink() (v *TJdbcTableSink) {
- if !p.IsSetJdbcTableSink() {
- return TDataSink_JdbcTableSink_DEFAULT
- }
- return p.JdbcTableSink
+func (p *THiveLocationParams) IsSetOriginalWritePath() bool {
+ return p.OriginalWritePath != nil
}
-var TDataSink_MultiCastStreamSink_DEFAULT *TMultiCastDataStreamSink
+func (p *THiveLocationParams) Read(iprot thrift.TProtocol) (err error) {
-func (p *TDataSink) GetMultiCastStreamSink() (v *TMultiCastDataStreamSink) {
- if !p.IsSetMultiCastStreamSink() {
- return TDataSink_MultiCastStreamSink_DEFAULT
- }
- return p.MultiCastStreamSink
-}
+ var fieldTypeId thrift.TType
+ var fieldId int16
+
+ if _, err = iprot.ReadStructBegin(); err != nil {
+ goto ReadStructBeginError
+ }
+
+ for {
+ _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin()
+ if err != nil {
+ goto ReadFieldBeginError
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+
+ switch fieldId {
+ case 1:
+ if fieldTypeId == thrift.STRING {
+ if err = p.ReadField1(iprot); err != nil {
+ goto ReadFieldError
+ }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ case 2:
+ if fieldTypeId == thrift.STRING {
+ if err = p.ReadField2(iprot); err != nil {
+ goto ReadFieldError
+ }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ case 3:
+ if fieldTypeId == thrift.I32 {
+ if err = p.ReadField3(iprot); err != nil {
+ goto ReadFieldError
+ }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ case 4:
+ if fieldTypeId == thrift.STRING {
+ if err = p.ReadField4(iprot); err != nil {
+ goto ReadFieldError
+ }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ default:
+ if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ }
+ if err = iprot.ReadFieldEnd(); err != nil {
+ goto ReadFieldEndError
+ }
+ }
+ if err = iprot.ReadStructEnd(); err != nil {
+ goto ReadStructEndError
+ }
+
+ return nil
+ReadStructBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err)
+ReadFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
+ReadFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_THiveLocationParams[fieldId]), err)
+SkipFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
+
+ReadFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err)
+ReadStructEndError:
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+}
+
+func (p *THiveLocationParams) ReadField1(iprot thrift.TProtocol) error {
+
+ var _field *string
+ if v, err := iprot.ReadString(); err != nil {
+ return err
+ } else {
+ _field = &v
+ }
+ p.WritePath = _field
+ return nil
+}
+func (p *THiveLocationParams) ReadField2(iprot thrift.TProtocol) error {
+
+ var _field *string
+ if v, err := iprot.ReadString(); err != nil {
+ return err
+ } else {
+ _field = &v
+ }
+ p.TargetPath = _field
+ return nil
+}
+func (p *THiveLocationParams) ReadField3(iprot thrift.TProtocol) error {
+
+ var _field *types.TFileType
+ if v, err := iprot.ReadI32(); err != nil {
+ return err
+ } else {
+ tmp := types.TFileType(v)
+ _field = &tmp
+ }
+ p.FileType = _field
+ return nil
+}
+func (p *THiveLocationParams) ReadField4(iprot thrift.TProtocol) error {
+
+ var _field *string
+ if v, err := iprot.ReadString(); err != nil {
+ return err
+ } else {
+ _field = &v
+ }
+ p.OriginalWritePath = _field
+ return nil
+}
+
+func (p *THiveLocationParams) Write(oprot thrift.TProtocol) (err error) {
+ var fieldId int16
+ if err = oprot.WriteStructBegin("THiveLocationParams"); err != nil {
+ goto WriteStructBeginError
+ }
+ if p != nil {
+ if err = p.writeField1(oprot); err != nil {
+ fieldId = 1
+ goto WriteFieldError
+ }
+ if err = p.writeField2(oprot); err != nil {
+ fieldId = 2
+ goto WriteFieldError
+ }
+ if err = p.writeField3(oprot); err != nil {
+ fieldId = 3
+ goto WriteFieldError
+ }
+ if err = p.writeField4(oprot); err != nil {
+ fieldId = 4
+ goto WriteFieldError
+ }
+ }
+ if err = oprot.WriteFieldStop(); err != nil {
+ goto WriteFieldStopError
+ }
+ if err = oprot.WriteStructEnd(); err != nil {
+ goto WriteStructEndError
+ }
+ return nil
+WriteStructBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+WriteFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err)
+WriteFieldStopError:
+ return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err)
+WriteStructEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err)
+}
+
+func (p *THiveLocationParams) writeField1(oprot thrift.TProtocol) (err error) {
+ if p.IsSetWritePath() {
+ if err = oprot.WriteFieldBegin("write_path", thrift.STRING, 1); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteString(*p.WritePath); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err)
+}
+
+func (p *THiveLocationParams) writeField2(oprot thrift.TProtocol) (err error) {
+ if p.IsSetTargetPath() {
+ if err = oprot.WriteFieldBegin("target_path", thrift.STRING, 2); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteString(*p.TargetPath); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err)
+}
+
+func (p *THiveLocationParams) writeField3(oprot thrift.TProtocol) (err error) {
+ if p.IsSetFileType() {
+ if err = oprot.WriteFieldBegin("file_type", thrift.I32, 3); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteI32(int32(*p.FileType)); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err)
+}
+
+func (p *THiveLocationParams) writeField4(oprot thrift.TProtocol) (err error) {
+ if p.IsSetOriginalWritePath() {
+ if err = oprot.WriteFieldBegin("original_write_path", thrift.STRING, 4); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteString(*p.OriginalWritePath); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err)
+}
+
+func (p *THiveLocationParams) String() string {
+ if p == nil {
+ return ""
+ }
+ return fmt.Sprintf("THiveLocationParams(%+v)", *p)
+
+}
+
+func (p *THiveLocationParams) DeepEqual(ano *THiveLocationParams) bool {
+ if p == ano {
+ return true
+ } else if p == nil || ano == nil {
+ return false
+ }
+ if !p.Field1DeepEqual(ano.WritePath) {
+ return false
+ }
+ if !p.Field2DeepEqual(ano.TargetPath) {
+ return false
+ }
+ if !p.Field3DeepEqual(ano.FileType) {
+ return false
+ }
+ if !p.Field4DeepEqual(ano.OriginalWritePath) {
+ return false
+ }
+ return true
+}
+
+func (p *THiveLocationParams) Field1DeepEqual(src *string) bool {
+
+ if p.WritePath == src {
+ return true
+ } else if p.WritePath == nil || src == nil {
+ return false
+ }
+ if strings.Compare(*p.WritePath, *src) != 0 {
+ return false
+ }
+ return true
+}
+func (p *THiveLocationParams) Field2DeepEqual(src *string) bool {
+
+ if p.TargetPath == src {
+ return true
+ } else if p.TargetPath == nil || src == nil {
+ return false
+ }
+ if strings.Compare(*p.TargetPath, *src) != 0 {
+ return false
+ }
+ return true
+}
+func (p *THiveLocationParams) Field3DeepEqual(src *types.TFileType) bool {
+
+ if p.FileType == src {
+ return true
+ } else if p.FileType == nil || src == nil {
+ return false
+ }
+ if *p.FileType != *src {
+ return false
+ }
+ return true
+}
+func (p *THiveLocationParams) Field4DeepEqual(src *string) bool {
+
+ if p.OriginalWritePath == src {
+ return true
+ } else if p.OriginalWritePath == nil || src == nil {
+ return false
+ }
+ if strings.Compare(*p.OriginalWritePath, *src) != 0 {
+ return false
+ }
+ return true
+}
+
+type TSortedColumn struct {
+ SortColumnName *string `thrift:"sort_column_name,1,optional" frugal:"1,optional,string" json:"sort_column_name,omitempty"`
+ Order *int32 `thrift:"order,2,optional" frugal:"2,optional,i32" json:"order,omitempty"`
+}
+
+func NewTSortedColumn() *TSortedColumn {
+ return &TSortedColumn{}
+}
+
+func (p *TSortedColumn) InitDefault() {
+}
+
+var TSortedColumn_SortColumnName_DEFAULT string
+
+func (p *TSortedColumn) GetSortColumnName() (v string) {
+ if !p.IsSetSortColumnName() {
+ return TSortedColumn_SortColumnName_DEFAULT
+ }
+ return *p.SortColumnName
+}
+
+var TSortedColumn_Order_DEFAULT int32
+
+func (p *TSortedColumn) GetOrder() (v int32) {
+ if !p.IsSetOrder() {
+ return TSortedColumn_Order_DEFAULT
+ }
+ return *p.Order
+}
+func (p *TSortedColumn) SetSortColumnName(val *string) {
+ p.SortColumnName = val
+}
+func (p *TSortedColumn) SetOrder(val *int32) {
+ p.Order = val
+}
+
+var fieldIDToName_TSortedColumn = map[int16]string{
+ 1: "sort_column_name",
+ 2: "order",
+}
+
+func (p *TSortedColumn) IsSetSortColumnName() bool {
+ return p.SortColumnName != nil
+}
+
+func (p *TSortedColumn) IsSetOrder() bool {
+ return p.Order != nil
+}
+
+func (p *TSortedColumn) Read(iprot thrift.TProtocol) (err error) {
+
+ var fieldTypeId thrift.TType
+ var fieldId int16
+
+ if _, err = iprot.ReadStructBegin(); err != nil {
+ goto ReadStructBeginError
+ }
+
+ for {
+ _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin()
+ if err != nil {
+ goto ReadFieldBeginError
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+
+ switch fieldId {
+ case 1:
+ if fieldTypeId == thrift.STRING {
+ if err = p.ReadField1(iprot); err != nil {
+ goto ReadFieldError
+ }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ case 2:
+ if fieldTypeId == thrift.I32 {
+ if err = p.ReadField2(iprot); err != nil {
+ goto ReadFieldError
+ }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ default:
+ if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ }
+ if err = iprot.ReadFieldEnd(); err != nil {
+ goto ReadFieldEndError
+ }
+ }
+ if err = iprot.ReadStructEnd(); err != nil {
+ goto ReadStructEndError
+ }
+
+ return nil
+ReadStructBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err)
+ReadFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
+ReadFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TSortedColumn[fieldId]), err)
+SkipFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
+
+ReadFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err)
+ReadStructEndError:
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+}
+
+func (p *TSortedColumn) ReadField1(iprot thrift.TProtocol) error {
+
+ var _field *string
+ if v, err := iprot.ReadString(); err != nil {
+ return err
+ } else {
+ _field = &v
+ }
+ p.SortColumnName = _field
+ return nil
+}
+func (p *TSortedColumn) ReadField2(iprot thrift.TProtocol) error {
+
+ var _field *int32
+ if v, err := iprot.ReadI32(); err != nil {
+ return err
+ } else {
+ _field = &v
+ }
+ p.Order = _field
+ return nil
+}
+
+func (p *TSortedColumn) Write(oprot thrift.TProtocol) (err error) {
+ var fieldId int16
+ if err = oprot.WriteStructBegin("TSortedColumn"); err != nil {
+ goto WriteStructBeginError
+ }
+ if p != nil {
+ if err = p.writeField1(oprot); err != nil {
+ fieldId = 1
+ goto WriteFieldError
+ }
+ if err = p.writeField2(oprot); err != nil {
+ fieldId = 2
+ goto WriteFieldError
+ }
+ }
+ if err = oprot.WriteFieldStop(); err != nil {
+ goto WriteFieldStopError
+ }
+ if err = oprot.WriteStructEnd(); err != nil {
+ goto WriteStructEndError
+ }
+ return nil
+WriteStructBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+WriteFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err)
+WriteFieldStopError:
+ return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err)
+WriteStructEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err)
+}
+
+func (p *TSortedColumn) writeField1(oprot thrift.TProtocol) (err error) {
+ if p.IsSetSortColumnName() {
+ if err = oprot.WriteFieldBegin("sort_column_name", thrift.STRING, 1); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteString(*p.SortColumnName); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err)
+}
+
+func (p *TSortedColumn) writeField2(oprot thrift.TProtocol) (err error) {
+ if p.IsSetOrder() {
+ if err = oprot.WriteFieldBegin("order", thrift.I32, 2); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteI32(*p.Order); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err)
+}
+
+func (p *TSortedColumn) String() string {
+ if p == nil {
+ return ""
+ }
+ return fmt.Sprintf("TSortedColumn(%+v)", *p)
+
+}
+
+func (p *TSortedColumn) DeepEqual(ano *TSortedColumn) bool {
+ if p == ano {
+ return true
+ } else if p == nil || ano == nil {
+ return false
+ }
+ if !p.Field1DeepEqual(ano.SortColumnName) {
+ return false
+ }
+ if !p.Field2DeepEqual(ano.Order) {
+ return false
+ }
+ return true
+}
+
+func (p *TSortedColumn) Field1DeepEqual(src *string) bool {
+
+ if p.SortColumnName == src {
+ return true
+ } else if p.SortColumnName == nil || src == nil {
+ return false
+ }
+ if strings.Compare(*p.SortColumnName, *src) != 0 {
+ return false
+ }
+ return true
+}
+func (p *TSortedColumn) Field2DeepEqual(src *int32) bool {
+
+ if p.Order == src {
+ return true
+ } else if p.Order == nil || src == nil {
+ return false
+ }
+ if *p.Order != *src {
+ return false
+ }
+ return true
+}
+
+type TBucketingMode struct {
+ BucketVersion *int32 `thrift:"bucket_version,1,optional" frugal:"1,optional,i32" json:"bucket_version,omitempty"`
+}
+
+func NewTBucketingMode() *TBucketingMode {
+ return &TBucketingMode{}
+}
+
+func (p *TBucketingMode) InitDefault() {
+}
+
+var TBucketingMode_BucketVersion_DEFAULT int32
+
+func (p *TBucketingMode) GetBucketVersion() (v int32) {
+ if !p.IsSetBucketVersion() {
+ return TBucketingMode_BucketVersion_DEFAULT
+ }
+ return *p.BucketVersion
+}
+func (p *TBucketingMode) SetBucketVersion(val *int32) {
+ p.BucketVersion = val
+}
+
+var fieldIDToName_TBucketingMode = map[int16]string{
+ 1: "bucket_version",
+}
+
+func (p *TBucketingMode) IsSetBucketVersion() bool {
+ return p.BucketVersion != nil
+}
+
+func (p *TBucketingMode) Read(iprot thrift.TProtocol) (err error) {
+
+ var fieldTypeId thrift.TType
+ var fieldId int16
+
+ if _, err = iprot.ReadStructBegin(); err != nil {
+ goto ReadStructBeginError
+ }
+
+ for {
+ _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin()
+ if err != nil {
+ goto ReadFieldBeginError
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+
+ switch fieldId {
+ case 1:
+ if fieldTypeId == thrift.I32 {
+ if err = p.ReadField1(iprot); err != nil {
+ goto ReadFieldError
+ }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ default:
+ if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ }
+ if err = iprot.ReadFieldEnd(); err != nil {
+ goto ReadFieldEndError
+ }
+ }
+ if err = iprot.ReadStructEnd(); err != nil {
+ goto ReadStructEndError
+ }
+
+ return nil
+ReadStructBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err)
+ReadFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
+ReadFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_TBucketingMode[fieldId]), err)
+SkipFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
+
+ReadFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err)
+ReadStructEndError:
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+}
+
+func (p *TBucketingMode) ReadField1(iprot thrift.TProtocol) error {
+
+ var _field *int32
+ if v, err := iprot.ReadI32(); err != nil {
+ return err
+ } else {
+ _field = &v
+ }
+ p.BucketVersion = _field
+ return nil
+}
+
+func (p *TBucketingMode) Write(oprot thrift.TProtocol) (err error) {
+ var fieldId int16
+ if err = oprot.WriteStructBegin("TBucketingMode"); err != nil {
+ goto WriteStructBeginError
+ }
+ if p != nil {
+ if err = p.writeField1(oprot); err != nil {
+ fieldId = 1
+ goto WriteFieldError
+ }
+ }
+ if err = oprot.WriteFieldStop(); err != nil {
+ goto WriteFieldStopError
+ }
+ if err = oprot.WriteStructEnd(); err != nil {
+ goto WriteStructEndError
+ }
+ return nil
+WriteStructBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+WriteFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err)
+WriteFieldStopError:
+ return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err)
+WriteStructEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err)
+}
+
+func (p *TBucketingMode) writeField1(oprot thrift.TProtocol) (err error) {
+ if p.IsSetBucketVersion() {
+ if err = oprot.WriteFieldBegin("bucket_version", thrift.I32, 1); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteI32(*p.BucketVersion); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err)
+}
+
+func (p *TBucketingMode) String() string {
+ if p == nil {
+ return ""
+ }
+ return fmt.Sprintf("TBucketingMode(%+v)", *p)
+
+}
+
+func (p *TBucketingMode) DeepEqual(ano *TBucketingMode) bool {
+ if p == ano {
+ return true
+ } else if p == nil || ano == nil {
+ return false
+ }
+ if !p.Field1DeepEqual(ano.BucketVersion) {
+ return false
+ }
+ return true
+}
+
+func (p *TBucketingMode) Field1DeepEqual(src *int32) bool {
+
+ if p.BucketVersion == src {
+ return true
+ } else if p.BucketVersion == nil || src == nil {
+ return false
+ }
+ if *p.BucketVersion != *src {
+ return false
+ }
+ return true
+}
+
+type THiveBucket struct {
+ BucketedBy []string `thrift:"bucketed_by,1,optional" frugal:"1,optional,list" json:"bucketed_by,omitempty"`
+ BucketMode *TBucketingMode `thrift:"bucket_mode,2,optional" frugal:"2,optional,TBucketingMode" json:"bucket_mode,omitempty"`
+ BucketCount *int32 `thrift:"bucket_count,3,optional" frugal:"3,optional,i32" json:"bucket_count,omitempty"`
+ SortedBy []*TSortedColumn `thrift:"sorted_by,4,optional" frugal:"4,optional,list" json:"sorted_by,omitempty"`
+}
+
+func NewTHiveBucket() *THiveBucket {
+ return &THiveBucket{}
+}
+
+func (p *THiveBucket) InitDefault() {
+}
+
+var THiveBucket_BucketedBy_DEFAULT []string
+
+func (p *THiveBucket) GetBucketedBy() (v []string) {
+ if !p.IsSetBucketedBy() {
+ return THiveBucket_BucketedBy_DEFAULT
+ }
+ return p.BucketedBy
+}
+
+var THiveBucket_BucketMode_DEFAULT *TBucketingMode
+
+func (p *THiveBucket) GetBucketMode() (v *TBucketingMode) {
+ if !p.IsSetBucketMode() {
+ return THiveBucket_BucketMode_DEFAULT
+ }
+ return p.BucketMode
+}
+
+var THiveBucket_BucketCount_DEFAULT int32
+
+func (p *THiveBucket) GetBucketCount() (v int32) {
+ if !p.IsSetBucketCount() {
+ return THiveBucket_BucketCount_DEFAULT
+ }
+ return *p.BucketCount
+}
+
+var THiveBucket_SortedBy_DEFAULT []*TSortedColumn
+
+func (p *THiveBucket) GetSortedBy() (v []*TSortedColumn) {
+ if !p.IsSetSortedBy() {
+ return THiveBucket_SortedBy_DEFAULT
+ }
+ return p.SortedBy
+}
+func (p *THiveBucket) SetBucketedBy(val []string) {
+ p.BucketedBy = val
+}
+func (p *THiveBucket) SetBucketMode(val *TBucketingMode) {
+ p.BucketMode = val
+}
+func (p *THiveBucket) SetBucketCount(val *int32) {
+ p.BucketCount = val
+}
+func (p *THiveBucket) SetSortedBy(val []*TSortedColumn) {
+ p.SortedBy = val
+}
+
+var fieldIDToName_THiveBucket = map[int16]string{
+ 1: "bucketed_by",
+ 2: "bucket_mode",
+ 3: "bucket_count",
+ 4: "sorted_by",
+}
+
+func (p *THiveBucket) IsSetBucketedBy() bool {
+ return p.BucketedBy != nil
+}
+
+func (p *THiveBucket) IsSetBucketMode() bool {
+ return p.BucketMode != nil
+}
+
+func (p *THiveBucket) IsSetBucketCount() bool {
+ return p.BucketCount != nil
+}
+
+func (p *THiveBucket) IsSetSortedBy() bool {
+ return p.SortedBy != nil
+}
+
+func (p *THiveBucket) Read(iprot thrift.TProtocol) (err error) {
+
+ var fieldTypeId thrift.TType
+ var fieldId int16
+
+ if _, err = iprot.ReadStructBegin(); err != nil {
+ goto ReadStructBeginError
+ }
+
+ for {
+ _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin()
+ if err != nil {
+ goto ReadFieldBeginError
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+
+ switch fieldId {
+ case 1:
+ if fieldTypeId == thrift.LIST {
+ if err = p.ReadField1(iprot); err != nil {
+ goto ReadFieldError
+ }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ case 2:
+ if fieldTypeId == thrift.STRUCT {
+ if err = p.ReadField2(iprot); err != nil {
+ goto ReadFieldError
+ }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ case 3:
+ if fieldTypeId == thrift.I32 {
+ if err = p.ReadField3(iprot); err != nil {
+ goto ReadFieldError
+ }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ case 4:
+ if fieldTypeId == thrift.LIST {
+ if err = p.ReadField4(iprot); err != nil {
+ goto ReadFieldError
+ }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ default:
+ if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ }
+ if err = iprot.ReadFieldEnd(); err != nil {
+ goto ReadFieldEndError
+ }
+ }
+ if err = iprot.ReadStructEnd(); err != nil {
+ goto ReadStructEndError
+ }
+
+ return nil
+ReadStructBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err)
+ReadFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
+ReadFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_THiveBucket[fieldId]), err)
+SkipFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
+
+ReadFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err)
+ReadStructEndError:
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+}
+
+func (p *THiveBucket) ReadField1(iprot thrift.TProtocol) error {
+ _, size, err := iprot.ReadListBegin()
+ if err != nil {
+ return err
+ }
+ _field := make([]string, 0, size)
+ for i := 0; i < size; i++ {
+
+ var _elem string
+ if v, err := iprot.ReadString(); err != nil {
+ return err
+ } else {
+ _elem = v
+ }
+
+ _field = append(_field, _elem)
+ }
+ if err := iprot.ReadListEnd(); err != nil {
+ return err
+ }
+ p.BucketedBy = _field
+ return nil
+}
+func (p *THiveBucket) ReadField2(iprot thrift.TProtocol) error {
+ _field := NewTBucketingMode()
+ if err := _field.Read(iprot); err != nil {
+ return err
+ }
+ p.BucketMode = _field
+ return nil
+}
+func (p *THiveBucket) ReadField3(iprot thrift.TProtocol) error {
+
+ var _field *int32
+ if v, err := iprot.ReadI32(); err != nil {
+ return err
+ } else {
+ _field = &v
+ }
+ p.BucketCount = _field
+ return nil
+}
+func (p *THiveBucket) ReadField4(iprot thrift.TProtocol) error {
+ _, size, err := iprot.ReadListBegin()
+ if err != nil {
+ return err
+ }
+ _field := make([]*TSortedColumn, 0, size)
+ values := make([]TSortedColumn, size)
+ for i := 0; i < size; i++ {
+ _elem := &values[i]
+ _elem.InitDefault()
+
+ if err := _elem.Read(iprot); err != nil {
+ return err
+ }
+
+ _field = append(_field, _elem)
+ }
+ if err := iprot.ReadListEnd(); err != nil {
+ return err
+ }
+ p.SortedBy = _field
+ return nil
+}
+
+func (p *THiveBucket) Write(oprot thrift.TProtocol) (err error) {
+ var fieldId int16
+ if err = oprot.WriteStructBegin("THiveBucket"); err != nil {
+ goto WriteStructBeginError
+ }
+ if p != nil {
+ if err = p.writeField1(oprot); err != nil {
+ fieldId = 1
+ goto WriteFieldError
+ }
+ if err = p.writeField2(oprot); err != nil {
+ fieldId = 2
+ goto WriteFieldError
+ }
+ if err = p.writeField3(oprot); err != nil {
+ fieldId = 3
+ goto WriteFieldError
+ }
+ if err = p.writeField4(oprot); err != nil {
+ fieldId = 4
+ goto WriteFieldError
+ }
+ }
+ if err = oprot.WriteFieldStop(); err != nil {
+ goto WriteFieldStopError
+ }
+ if err = oprot.WriteStructEnd(); err != nil {
+ goto WriteStructEndError
+ }
+ return nil
+WriteStructBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+WriteFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err)
+WriteFieldStopError:
+ return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err)
+WriteStructEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err)
+}
+
+func (p *THiveBucket) writeField1(oprot thrift.TProtocol) (err error) {
+ if p.IsSetBucketedBy() {
+ if err = oprot.WriteFieldBegin("bucketed_by", thrift.LIST, 1); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteListBegin(thrift.STRING, len(p.BucketedBy)); err != nil {
+ return err
+ }
+ for _, v := range p.BucketedBy {
+ if err := oprot.WriteString(v); err != nil {
+ return err
+ }
+ }
+ if err := oprot.WriteListEnd(); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err)
+}
+
+func (p *THiveBucket) writeField2(oprot thrift.TProtocol) (err error) {
+ if p.IsSetBucketMode() {
+ if err = oprot.WriteFieldBegin("bucket_mode", thrift.STRUCT, 2); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := p.BucketMode.Write(oprot); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err)
+}
+
+func (p *THiveBucket) writeField3(oprot thrift.TProtocol) (err error) {
+ if p.IsSetBucketCount() {
+ if err = oprot.WriteFieldBegin("bucket_count", thrift.I32, 3); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteI32(*p.BucketCount); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err)
+}
+
+func (p *THiveBucket) writeField4(oprot thrift.TProtocol) (err error) {
+ if p.IsSetSortedBy() {
+ if err = oprot.WriteFieldBegin("sorted_by", thrift.LIST, 4); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteListBegin(thrift.STRUCT, len(p.SortedBy)); err != nil {
+ return err
+ }
+ for _, v := range p.SortedBy {
+ if err := v.Write(oprot); err != nil {
+ return err
+ }
+ }
+ if err := oprot.WriteListEnd(); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err)
+}
+
+func (p *THiveBucket) String() string {
+ if p == nil {
+ return ""
+ }
+ return fmt.Sprintf("THiveBucket(%+v)", *p)
+
+}
+
+func (p *THiveBucket) DeepEqual(ano *THiveBucket) bool {
+ if p == ano {
+ return true
+ } else if p == nil || ano == nil {
+ return false
+ }
+ if !p.Field1DeepEqual(ano.BucketedBy) {
+ return false
+ }
+ if !p.Field2DeepEqual(ano.BucketMode) {
+ return false
+ }
+ if !p.Field3DeepEqual(ano.BucketCount) {
+ return false
+ }
+ if !p.Field4DeepEqual(ano.SortedBy) {
+ return false
+ }
+ return true
+}
+
+func (p *THiveBucket) Field1DeepEqual(src []string) bool {
+
+ if len(p.BucketedBy) != len(src) {
+ return false
+ }
+ for i, v := range p.BucketedBy {
+ _src := src[i]
+ if strings.Compare(v, _src) != 0 {
+ return false
+ }
+ }
+ return true
+}
+func (p *THiveBucket) Field2DeepEqual(src *TBucketingMode) bool {
+
+ if !p.BucketMode.DeepEqual(src) {
+ return false
+ }
+ return true
+}
+func (p *THiveBucket) Field3DeepEqual(src *int32) bool {
+
+ if p.BucketCount == src {
+ return true
+ } else if p.BucketCount == nil || src == nil {
+ return false
+ }
+ if *p.BucketCount != *src {
+ return false
+ }
+ return true
+}
+func (p *THiveBucket) Field4DeepEqual(src []*TSortedColumn) bool {
+
+ if len(p.SortedBy) != len(src) {
+ return false
+ }
+ for i, v := range p.SortedBy {
+ _src := src[i]
+ if !v.DeepEqual(_src) {
+ return false
+ }
+ }
+ return true
+}
+
+type THiveColumn struct {
+ Name *string `thrift:"name,1,optional" frugal:"1,optional,string" json:"name,omitempty"`
+ ColumnType *THiveColumnType `thrift:"column_type,2,optional" frugal:"2,optional,THiveColumnType" json:"column_type,omitempty"`
+}
+
+func NewTHiveColumn() *THiveColumn {
+ return &THiveColumn{}
+}
+
+func (p *THiveColumn) InitDefault() {
+}
+
+var THiveColumn_Name_DEFAULT string
+
+func (p *THiveColumn) GetName() (v string) {
+ if !p.IsSetName() {
+ return THiveColumn_Name_DEFAULT
+ }
+ return *p.Name
+}
+
+var THiveColumn_ColumnType_DEFAULT THiveColumnType
+
+func (p *THiveColumn) GetColumnType() (v THiveColumnType) {
+ if !p.IsSetColumnType() {
+ return THiveColumn_ColumnType_DEFAULT
+ }
+ return *p.ColumnType
+}
+func (p *THiveColumn) SetName(val *string) {
+ p.Name = val
+}
+func (p *THiveColumn) SetColumnType(val *THiveColumnType) {
+ p.ColumnType = val
+}
+
+var fieldIDToName_THiveColumn = map[int16]string{
+ 1: "name",
+ 2: "column_type",
+}
+
+func (p *THiveColumn) IsSetName() bool {
+ return p.Name != nil
+}
+
+func (p *THiveColumn) IsSetColumnType() bool {
+ return p.ColumnType != nil
+}
+
+func (p *THiveColumn) Read(iprot thrift.TProtocol) (err error) {
+
+ var fieldTypeId thrift.TType
+ var fieldId int16
+
+ if _, err = iprot.ReadStructBegin(); err != nil {
+ goto ReadStructBeginError
+ }
+
+ for {
+ _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin()
+ if err != nil {
+ goto ReadFieldBeginError
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+
+ switch fieldId {
+ case 1:
+ if fieldTypeId == thrift.STRING {
+ if err = p.ReadField1(iprot); err != nil {
+ goto ReadFieldError
+ }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ case 2:
+ if fieldTypeId == thrift.I32 {
+ if err = p.ReadField2(iprot); err != nil {
+ goto ReadFieldError
+ }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ default:
+ if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ }
+ if err = iprot.ReadFieldEnd(); err != nil {
+ goto ReadFieldEndError
+ }
+ }
+ if err = iprot.ReadStructEnd(); err != nil {
+ goto ReadStructEndError
+ }
+
+ return nil
+ReadStructBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err)
+ReadFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
+ReadFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_THiveColumn[fieldId]), err)
+SkipFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
+
+ReadFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err)
+ReadStructEndError:
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+}
+
+func (p *THiveColumn) ReadField1(iprot thrift.TProtocol) error {
+
+ var _field *string
+ if v, err := iprot.ReadString(); err != nil {
+ return err
+ } else {
+ _field = &v
+ }
+ p.Name = _field
+ return nil
+}
+func (p *THiveColumn) ReadField2(iprot thrift.TProtocol) error {
+
+ var _field *THiveColumnType
+ if v, err := iprot.ReadI32(); err != nil {
+ return err
+ } else {
+ tmp := THiveColumnType(v)
+ _field = &tmp
+ }
+ p.ColumnType = _field
+ return nil
+}
+
+func (p *THiveColumn) Write(oprot thrift.TProtocol) (err error) {
+ var fieldId int16
+ if err = oprot.WriteStructBegin("THiveColumn"); err != nil {
+ goto WriteStructBeginError
+ }
+ if p != nil {
+ if err = p.writeField1(oprot); err != nil {
+ fieldId = 1
+ goto WriteFieldError
+ }
+ if err = p.writeField2(oprot); err != nil {
+ fieldId = 2
+ goto WriteFieldError
+ }
+ }
+ if err = oprot.WriteFieldStop(); err != nil {
+ goto WriteFieldStopError
+ }
+ if err = oprot.WriteStructEnd(); err != nil {
+ goto WriteStructEndError
+ }
+ return nil
+WriteStructBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+WriteFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err)
+WriteFieldStopError:
+ return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err)
+WriteStructEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err)
+}
+
+func (p *THiveColumn) writeField1(oprot thrift.TProtocol) (err error) {
+ if p.IsSetName() {
+ if err = oprot.WriteFieldBegin("name", thrift.STRING, 1); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteString(*p.Name); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err)
+}
+
+func (p *THiveColumn) writeField2(oprot thrift.TProtocol) (err error) {
+ if p.IsSetColumnType() {
+ if err = oprot.WriteFieldBegin("column_type", thrift.I32, 2); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteI32(int32(*p.ColumnType)); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err)
+}
+
+func (p *THiveColumn) String() string {
+ if p == nil {
+ return ""
+ }
+ return fmt.Sprintf("THiveColumn(%+v)", *p)
+
+}
+
+func (p *THiveColumn) DeepEqual(ano *THiveColumn) bool {
+ if p == ano {
+ return true
+ } else if p == nil || ano == nil {
+ return false
+ }
+ if !p.Field1DeepEqual(ano.Name) {
+ return false
+ }
+ if !p.Field2DeepEqual(ano.ColumnType) {
+ return false
+ }
+ return true
+}
+
+func (p *THiveColumn) Field1DeepEqual(src *string) bool {
+
+ if p.Name == src {
+ return true
+ } else if p.Name == nil || src == nil {
+ return false
+ }
+ if strings.Compare(*p.Name, *src) != 0 {
+ return false
+ }
+ return true
+}
+func (p *THiveColumn) Field2DeepEqual(src *THiveColumnType) bool {
+
+ if p.ColumnType == src {
+ return true
+ } else if p.ColumnType == nil || src == nil {
+ return false
+ }
+ if *p.ColumnType != *src {
+ return false
+ }
+ return true
+}
+
+type THivePartition struct {
+ Values []string `thrift:"values,1,optional" frugal:"1,optional,list" json:"values,omitempty"`
+ Location *THiveLocationParams `thrift:"location,2,optional" frugal:"2,optional,THiveLocationParams" json:"location,omitempty"`
+ FileFormat *plannodes.TFileFormatType `thrift:"file_format,3,optional" frugal:"3,optional,TFileFormatType" json:"file_format,omitempty"`
+}
+
+func NewTHivePartition() *THivePartition {
+ return &THivePartition{}
+}
+
+func (p *THivePartition) InitDefault() {
+}
+
+var THivePartition_Values_DEFAULT []string
+
+func (p *THivePartition) GetValues() (v []string) {
+ if !p.IsSetValues() {
+ return THivePartition_Values_DEFAULT
+ }
+ return p.Values
+}
+
+var THivePartition_Location_DEFAULT *THiveLocationParams
+
+func (p *THivePartition) GetLocation() (v *THiveLocationParams) {
+ if !p.IsSetLocation() {
+ return THivePartition_Location_DEFAULT
+ }
+ return p.Location
+}
+
+var THivePartition_FileFormat_DEFAULT plannodes.TFileFormatType
+
+func (p *THivePartition) GetFileFormat() (v plannodes.TFileFormatType) {
+ if !p.IsSetFileFormat() {
+ return THivePartition_FileFormat_DEFAULT
+ }
+ return *p.FileFormat
+}
+func (p *THivePartition) SetValues(val []string) {
+ p.Values = val
+}
+func (p *THivePartition) SetLocation(val *THiveLocationParams) {
+ p.Location = val
+}
+func (p *THivePartition) SetFileFormat(val *plannodes.TFileFormatType) {
+ p.FileFormat = val
+}
+
+var fieldIDToName_THivePartition = map[int16]string{
+ 1: "values",
+ 2: "location",
+ 3: "file_format",
+}
+
+func (p *THivePartition) IsSetValues() bool {
+ return p.Values != nil
+}
+
+func (p *THivePartition) IsSetLocation() bool {
+ return p.Location != nil
+}
+
+func (p *THivePartition) IsSetFileFormat() bool {
+ return p.FileFormat != nil
+}
+
+func (p *THivePartition) Read(iprot thrift.TProtocol) (err error) {
+
+ var fieldTypeId thrift.TType
+ var fieldId int16
+
+ if _, err = iprot.ReadStructBegin(); err != nil {
+ goto ReadStructBeginError
+ }
+
+ for {
+ _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin()
+ if err != nil {
+ goto ReadFieldBeginError
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+
+ switch fieldId {
+ case 1:
+ if fieldTypeId == thrift.LIST {
+ if err = p.ReadField1(iprot); err != nil {
+ goto ReadFieldError
+ }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ case 2:
+ if fieldTypeId == thrift.STRUCT {
+ if err = p.ReadField2(iprot); err != nil {
+ goto ReadFieldError
+ }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ case 3:
+ if fieldTypeId == thrift.I32 {
+ if err = p.ReadField3(iprot); err != nil {
+ goto ReadFieldError
+ }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ default:
+ if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ }
+ if err = iprot.ReadFieldEnd(); err != nil {
+ goto ReadFieldEndError
+ }
+ }
+ if err = iprot.ReadStructEnd(); err != nil {
+ goto ReadStructEndError
+ }
+
+ return nil
+ReadStructBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err)
+ReadFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
+ReadFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_THivePartition[fieldId]), err)
+SkipFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
+
+ReadFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err)
+ReadStructEndError:
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+}
+
+func (p *THivePartition) ReadField1(iprot thrift.TProtocol) error {
+ _, size, err := iprot.ReadListBegin()
+ if err != nil {
+ return err
+ }
+ _field := make([]string, 0, size)
+ for i := 0; i < size; i++ {
+
+ var _elem string
+ if v, err := iprot.ReadString(); err != nil {
+ return err
+ } else {
+ _elem = v
+ }
+
+ _field = append(_field, _elem)
+ }
+ if err := iprot.ReadListEnd(); err != nil {
+ return err
+ }
+ p.Values = _field
+ return nil
+}
+func (p *THivePartition) ReadField2(iprot thrift.TProtocol) error {
+ _field := NewTHiveLocationParams()
+ if err := _field.Read(iprot); err != nil {
+ return err
+ }
+ p.Location = _field
+ return nil
+}
+func (p *THivePartition) ReadField3(iprot thrift.TProtocol) error {
+
+ var _field *plannodes.TFileFormatType
+ if v, err := iprot.ReadI32(); err != nil {
+ return err
+ } else {
+ tmp := plannodes.TFileFormatType(v)
+ _field = &tmp
+ }
+ p.FileFormat = _field
+ return nil
+}
+
+func (p *THivePartition) Write(oprot thrift.TProtocol) (err error) {
+ var fieldId int16
+ if err = oprot.WriteStructBegin("THivePartition"); err != nil {
+ goto WriteStructBeginError
+ }
+ if p != nil {
+ if err = p.writeField1(oprot); err != nil {
+ fieldId = 1
+ goto WriteFieldError
+ }
+ if err = p.writeField2(oprot); err != nil {
+ fieldId = 2
+ goto WriteFieldError
+ }
+ if err = p.writeField3(oprot); err != nil {
+ fieldId = 3
+ goto WriteFieldError
+ }
+ }
+ if err = oprot.WriteFieldStop(); err != nil {
+ goto WriteFieldStopError
+ }
+ if err = oprot.WriteStructEnd(); err != nil {
+ goto WriteStructEndError
+ }
+ return nil
+WriteStructBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+WriteFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err)
+WriteFieldStopError:
+ return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err)
+WriteStructEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err)
+}
+
+func (p *THivePartition) writeField1(oprot thrift.TProtocol) (err error) {
+ if p.IsSetValues() {
+ if err = oprot.WriteFieldBegin("values", thrift.LIST, 1); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteListBegin(thrift.STRING, len(p.Values)); err != nil {
+ return err
+ }
+ for _, v := range p.Values {
+ if err := oprot.WriteString(v); err != nil {
+ return err
+ }
+ }
+ if err := oprot.WriteListEnd(); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err)
+}
+
+func (p *THivePartition) writeField2(oprot thrift.TProtocol) (err error) {
+ if p.IsSetLocation() {
+ if err = oprot.WriteFieldBegin("location", thrift.STRUCT, 2); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := p.Location.Write(oprot); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err)
+}
+
+func (p *THivePartition) writeField3(oprot thrift.TProtocol) (err error) {
+ if p.IsSetFileFormat() {
+ if err = oprot.WriteFieldBegin("file_format", thrift.I32, 3); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteI32(int32(*p.FileFormat)); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err)
+}
+
+func (p *THivePartition) String() string {
+ if p == nil {
+ return ""
+ }
+ return fmt.Sprintf("THivePartition(%+v)", *p)
+
+}
+
+func (p *THivePartition) DeepEqual(ano *THivePartition) bool {
+ if p == ano {
+ return true
+ } else if p == nil || ano == nil {
+ return false
+ }
+ if !p.Field1DeepEqual(ano.Values) {
+ return false
+ }
+ if !p.Field2DeepEqual(ano.Location) {
+ return false
+ }
+ if !p.Field3DeepEqual(ano.FileFormat) {
+ return false
+ }
+ return true
+}
+
+func (p *THivePartition) Field1DeepEqual(src []string) bool {
+
+ if len(p.Values) != len(src) {
+ return false
+ }
+ for i, v := range p.Values {
+ _src := src[i]
+ if strings.Compare(v, _src) != 0 {
+ return false
+ }
+ }
+ return true
+}
+func (p *THivePartition) Field2DeepEqual(src *THiveLocationParams) bool {
+
+ if !p.Location.DeepEqual(src) {
+ return false
+ }
+ return true
+}
+func (p *THivePartition) Field3DeepEqual(src *plannodes.TFileFormatType) bool {
+
+ if p.FileFormat == src {
+ return true
+ } else if p.FileFormat == nil || src == nil {
+ return false
+ }
+ if *p.FileFormat != *src {
+ return false
+ }
+ return true
+}
+
+type THiveSerDeProperties struct {
+ FieldDelim *string `thrift:"field_delim,1,optional" frugal:"1,optional,string" json:"field_delim,omitempty"`
+ LineDelim *string `thrift:"line_delim,2,optional" frugal:"2,optional,string" json:"line_delim,omitempty"`
+ CollectionDelim *string `thrift:"collection_delim,3,optional" frugal:"3,optional,string" json:"collection_delim,omitempty"`
+ MapkvDelim *string `thrift:"mapkv_delim,4,optional" frugal:"4,optional,string" json:"mapkv_delim,omitempty"`
+ EscapeChar *string `thrift:"escape_char,5,optional" frugal:"5,optional,string" json:"escape_char,omitempty"`
+ NullFormat *string `thrift:"null_format,6,optional" frugal:"6,optional,string" json:"null_format,omitempty"`
+}
+
+func NewTHiveSerDeProperties() *THiveSerDeProperties {
+ return &THiveSerDeProperties{}
+}
+
+func (p *THiveSerDeProperties) InitDefault() {
+}
+
+var THiveSerDeProperties_FieldDelim_DEFAULT string
+
+func (p *THiveSerDeProperties) GetFieldDelim() (v string) {
+ if !p.IsSetFieldDelim() {
+ return THiveSerDeProperties_FieldDelim_DEFAULT
+ }
+ return *p.FieldDelim
+}
+
+var THiveSerDeProperties_LineDelim_DEFAULT string
+
+func (p *THiveSerDeProperties) GetLineDelim() (v string) {
+ if !p.IsSetLineDelim() {
+ return THiveSerDeProperties_LineDelim_DEFAULT
+ }
+ return *p.LineDelim
+}
+
+var THiveSerDeProperties_CollectionDelim_DEFAULT string
+
+func (p *THiveSerDeProperties) GetCollectionDelim() (v string) {
+ if !p.IsSetCollectionDelim() {
+ return THiveSerDeProperties_CollectionDelim_DEFAULT
+ }
+ return *p.CollectionDelim
+}
+
+var THiveSerDeProperties_MapkvDelim_DEFAULT string
+
+func (p *THiveSerDeProperties) GetMapkvDelim() (v string) {
+ if !p.IsSetMapkvDelim() {
+ return THiveSerDeProperties_MapkvDelim_DEFAULT
+ }
+ return *p.MapkvDelim
+}
+
+var THiveSerDeProperties_EscapeChar_DEFAULT string
+
+func (p *THiveSerDeProperties) GetEscapeChar() (v string) {
+ if !p.IsSetEscapeChar() {
+ return THiveSerDeProperties_EscapeChar_DEFAULT
+ }
+ return *p.EscapeChar
+}
+
+var THiveSerDeProperties_NullFormat_DEFAULT string
+
+func (p *THiveSerDeProperties) GetNullFormat() (v string) {
+ if !p.IsSetNullFormat() {
+ return THiveSerDeProperties_NullFormat_DEFAULT
+ }
+ return *p.NullFormat
+}
+func (p *THiveSerDeProperties) SetFieldDelim(val *string) {
+ p.FieldDelim = val
+}
+func (p *THiveSerDeProperties) SetLineDelim(val *string) {
+ p.LineDelim = val
+}
+func (p *THiveSerDeProperties) SetCollectionDelim(val *string) {
+ p.CollectionDelim = val
+}
+func (p *THiveSerDeProperties) SetMapkvDelim(val *string) {
+ p.MapkvDelim = val
+}
+func (p *THiveSerDeProperties) SetEscapeChar(val *string) {
+ p.EscapeChar = val
+}
+func (p *THiveSerDeProperties) SetNullFormat(val *string) {
+ p.NullFormat = val
+}
+
+var fieldIDToName_THiveSerDeProperties = map[int16]string{
+ 1: "field_delim",
+ 2: "line_delim",
+ 3: "collection_delim",
+ 4: "mapkv_delim",
+ 5: "escape_char",
+ 6: "null_format",
+}
+
+func (p *THiveSerDeProperties) IsSetFieldDelim() bool {
+ return p.FieldDelim != nil
+}
+
+func (p *THiveSerDeProperties) IsSetLineDelim() bool {
+ return p.LineDelim != nil
+}
+
+func (p *THiveSerDeProperties) IsSetCollectionDelim() bool {
+ return p.CollectionDelim != nil
+}
+
+func (p *THiveSerDeProperties) IsSetMapkvDelim() bool {
+ return p.MapkvDelim != nil
+}
+
+func (p *THiveSerDeProperties) IsSetEscapeChar() bool {
+ return p.EscapeChar != nil
+}
+
+func (p *THiveSerDeProperties) IsSetNullFormat() bool {
+ return p.NullFormat != nil
+}
+
+func (p *THiveSerDeProperties) Read(iprot thrift.TProtocol) (err error) {
+
+ var fieldTypeId thrift.TType
+ var fieldId int16
+
+ if _, err = iprot.ReadStructBegin(); err != nil {
+ goto ReadStructBeginError
+ }
+
+ for {
+ _, fieldTypeId, fieldId, err = iprot.ReadFieldBegin()
+ if err != nil {
+ goto ReadFieldBeginError
+ }
+ if fieldTypeId == thrift.STOP {
+ break
+ }
+
+ switch fieldId {
+ case 1:
+ if fieldTypeId == thrift.STRING {
+ if err = p.ReadField1(iprot); err != nil {
+ goto ReadFieldError
+ }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ case 2:
+ if fieldTypeId == thrift.STRING {
+ if err = p.ReadField2(iprot); err != nil {
+ goto ReadFieldError
+ }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ case 3:
+ if fieldTypeId == thrift.STRING {
+ if err = p.ReadField3(iprot); err != nil {
+ goto ReadFieldError
+ }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ case 4:
+ if fieldTypeId == thrift.STRING {
+ if err = p.ReadField4(iprot); err != nil {
+ goto ReadFieldError
+ }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ case 5:
+ if fieldTypeId == thrift.STRING {
+ if err = p.ReadField5(iprot); err != nil {
+ goto ReadFieldError
+ }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ case 6:
+ if fieldTypeId == thrift.STRING {
+ if err = p.ReadField6(iprot); err != nil {
+ goto ReadFieldError
+ }
+ } else if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ default:
+ if err = iprot.Skip(fieldTypeId); err != nil {
+ goto SkipFieldError
+ }
+ }
+ if err = iprot.ReadFieldEnd(); err != nil {
+ goto ReadFieldEndError
+ }
+ }
+ if err = iprot.ReadStructEnd(); err != nil {
+ goto ReadStructEndError
+ }
+
+ return nil
+ReadStructBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T read struct begin error: ", p), err)
+ReadFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T read field %d begin error: ", p, fieldId), err)
+ReadFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T read field %d '%s' error: ", p, fieldId, fieldIDToName_THiveSerDeProperties[fieldId]), err)
+SkipFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T field %d skip type %d error: ", p, fieldId, fieldTypeId), err)
+
+ReadFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T read field end error", p), err)
+ReadStructEndError:
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+}
+
+func (p *THiveSerDeProperties) ReadField1(iprot thrift.TProtocol) error {
+
+ var _field *string
+ if v, err := iprot.ReadString(); err != nil {
+ return err
+ } else {
+ _field = &v
+ }
+ p.FieldDelim = _field
+ return nil
+}
+func (p *THiveSerDeProperties) ReadField2(iprot thrift.TProtocol) error {
+
+ var _field *string
+ if v, err := iprot.ReadString(); err != nil {
+ return err
+ } else {
+ _field = &v
+ }
+ p.LineDelim = _field
+ return nil
+}
+func (p *THiveSerDeProperties) ReadField3(iprot thrift.TProtocol) error {
+
+ var _field *string
+ if v, err := iprot.ReadString(); err != nil {
+ return err
+ } else {
+ _field = &v
+ }
+ p.CollectionDelim = _field
+ return nil
+}
+func (p *THiveSerDeProperties) ReadField4(iprot thrift.TProtocol) error {
+
+ var _field *string
+ if v, err := iprot.ReadString(); err != nil {
+ return err
+ } else {
+ _field = &v
+ }
+ p.MapkvDelim = _field
+ return nil
+}
+func (p *THiveSerDeProperties) ReadField5(iprot thrift.TProtocol) error {
+
+ var _field *string
+ if v, err := iprot.ReadString(); err != nil {
+ return err
+ } else {
+ _field = &v
+ }
+ p.EscapeChar = _field
+ return nil
+}
+func (p *THiveSerDeProperties) ReadField6(iprot thrift.TProtocol) error {
+
+ var _field *string
+ if v, err := iprot.ReadString(); err != nil {
+ return err
+ } else {
+ _field = &v
+ }
+ p.NullFormat = _field
+ return nil
+}
+
+func (p *THiveSerDeProperties) Write(oprot thrift.TProtocol) (err error) {
+ var fieldId int16
+ if err = oprot.WriteStructBegin("THiveSerDeProperties"); err != nil {
+ goto WriteStructBeginError
+ }
+ if p != nil {
+ if err = p.writeField1(oprot); err != nil {
+ fieldId = 1
+ goto WriteFieldError
+ }
+ if err = p.writeField2(oprot); err != nil {
+ fieldId = 2
+ goto WriteFieldError
+ }
+ if err = p.writeField3(oprot); err != nil {
+ fieldId = 3
+ goto WriteFieldError
+ }
+ if err = p.writeField4(oprot); err != nil {
+ fieldId = 4
+ goto WriteFieldError
+ }
+ if err = p.writeField5(oprot); err != nil {
+ fieldId = 5
+ goto WriteFieldError
+ }
+ if err = p.writeField6(oprot); err != nil {
+ fieldId = 6
+ goto WriteFieldError
+ }
+ }
+ if err = oprot.WriteFieldStop(); err != nil {
+ goto WriteFieldStopError
+ }
+ if err = oprot.WriteStructEnd(); err != nil {
+ goto WriteStructEndError
+ }
+ return nil
+WriteStructBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
+WriteFieldError:
+ return thrift.PrependError(fmt.Sprintf("%T write field %d error: ", p, fieldId), err)
+WriteFieldStopError:
+ return thrift.PrependError(fmt.Sprintf("%T write field stop error: ", p), err)
+WriteStructEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write struct end error: ", p), err)
+}
+
+func (p *THiveSerDeProperties) writeField1(oprot thrift.TProtocol) (err error) {
+ if p.IsSetFieldDelim() {
+ if err = oprot.WriteFieldBegin("field_delim", thrift.STRING, 1); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteString(*p.FieldDelim); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 1 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 1 end error: ", p), err)
+}
+
+func (p *THiveSerDeProperties) writeField2(oprot thrift.TProtocol) (err error) {
+ if p.IsSetLineDelim() {
+ if err = oprot.WriteFieldBegin("line_delim", thrift.STRING, 2); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteString(*p.LineDelim); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 2 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 2 end error: ", p), err)
+}
+
+func (p *THiveSerDeProperties) writeField3(oprot thrift.TProtocol) (err error) {
+ if p.IsSetCollectionDelim() {
+ if err = oprot.WriteFieldBegin("collection_delim", thrift.STRING, 3); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteString(*p.CollectionDelim); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 3 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 3 end error: ", p), err)
+}
+
+func (p *THiveSerDeProperties) writeField4(oprot thrift.TProtocol) (err error) {
+ if p.IsSetMapkvDelim() {
+ if err = oprot.WriteFieldBegin("mapkv_delim", thrift.STRING, 4); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteString(*p.MapkvDelim); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 4 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 4 end error: ", p), err)
+}
+
+func (p *THiveSerDeProperties) writeField5(oprot thrift.TProtocol) (err error) {
+ if p.IsSetEscapeChar() {
+ if err = oprot.WriteFieldBegin("escape_char", thrift.STRING, 5); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteString(*p.EscapeChar); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 5 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 5 end error: ", p), err)
+}
+
+func (p *THiveSerDeProperties) writeField6(oprot thrift.TProtocol) (err error) {
+ if p.IsSetNullFormat() {
+ if err = oprot.WriteFieldBegin("null_format", thrift.STRING, 6); err != nil {
+ goto WriteFieldBeginError
+ }
+ if err := oprot.WriteString(*p.NullFormat); err != nil {
+ return err
+ }
+ if err = oprot.WriteFieldEnd(); err != nil {
+ goto WriteFieldEndError
+ }
+ }
+ return nil
+WriteFieldBeginError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 6 begin error: ", p), err)
+WriteFieldEndError:
+ return thrift.PrependError(fmt.Sprintf("%T write field 6 end error: ", p), err)
+}
+
+func (p *THiveSerDeProperties) String() string {
+ if p == nil {
+ return ""
+ }
+ return fmt.Sprintf("THiveSerDeProperties(%+v)", *p)
+
+}
+
+func (p *THiveSerDeProperties) DeepEqual(ano *THiveSerDeProperties) bool {
+ if p == ano {
+ return true
+ } else if p == nil || ano == nil {
+ return false
+ }
+ if !p.Field1DeepEqual(ano.FieldDelim) {
+ return false
+ }
+ if !p.Field2DeepEqual(ano.LineDelim) {
+ return false
+ }
+ if !p.Field3DeepEqual(ano.CollectionDelim) {
+ return false
+ }
+ if !p.Field4DeepEqual(ano.MapkvDelim) {
+ return false
+ }
+ if !p.Field5DeepEqual(ano.EscapeChar) {
+ return false
+ }
+ if !p.Field6DeepEqual(ano.NullFormat) {
+ return false
+ }
+ return true
+}
+
+func (p *THiveSerDeProperties) Field1DeepEqual(src *string) bool {
+
+ if p.FieldDelim == src {
+ return true
+ } else if p.FieldDelim == nil || src == nil {
+ return false
+ }
+ if strings.Compare(*p.FieldDelim, *src) != 0 {
+ return false
+ }
+ return true
+}
+func (p *THiveSerDeProperties) Field2DeepEqual(src *string) bool {
+
+ if p.LineDelim == src {
+ return true
+ } else if p.LineDelim == nil || src == nil {
+ return false
+ }
+ if strings.Compare(*p.LineDelim, *src) != 0 {
+ return false
+ }
+ return true
+}
+func (p *THiveSerDeProperties) Field3DeepEqual(src *string) bool {
+
+ if p.CollectionDelim == src {
+ return true
+ } else if p.CollectionDelim == nil || src == nil {
+ return false
+ }
+ if strings.Compare(*p.CollectionDelim, *src) != 0 {
+ return false
+ }
+ return true
+}
+func (p *THiveSerDeProperties) Field4DeepEqual(src *string) bool {
+
+ if p.MapkvDelim == src {
+ return true
+ } else if p.MapkvDelim == nil || src == nil {
+ return false
+ }
+ if strings.Compare(*p.MapkvDelim, *src) != 0 {
+ return false
+ }
+ return true
+}
+func (p *THiveSerDeProperties) Field5DeepEqual(src *string) bool {
+
+ if p.EscapeChar == src {
+ return true
+ } else if p.EscapeChar == nil || src == nil {
+ return false
+ }
+ if strings.Compare(*p.EscapeChar, *src) != 0 {
+ return false
+ }
+ return true
+}
+func (p *THiveSerDeProperties) Field6DeepEqual(src *string) bool {
+
+ if p.NullFormat == src {
+ return true
+ } else if p.NullFormat == nil || src == nil {
+ return false
+ }
+ if strings.Compare(*p.NullFormat, *src) != 0 {
+ return false
+ }
+ return true
+}
+
+type THiveTableSink struct {
+ DbName *string `thrift:"db_name,1,optional" frugal:"1,optional,string" json:"db_name,omitempty"`
+ TableName *string `thrift:"table_name,2,optional" frugal:"2,optional,string" json:"table_name,omitempty"`
+ Columns []*THiveColumn `thrift:"columns,3,optional" frugal:"3,optional,list" json:"columns,omitempty"`
+ Partitions []*THivePartition `thrift:"partitions,4,optional" frugal:"4,optional,list" json:"partitions,omitempty"`
+ BucketInfo *THiveBucket `thrift:"bucket_info,5,optional" frugal:"5,optional,THiveBucket" json:"bucket_info,omitempty"`
+ FileFormat *plannodes.TFileFormatType `thrift:"file_format,6,optional" frugal:"6,optional,TFileFormatType" json:"file_format,omitempty"`
+ CompressionType *plannodes.TFileCompressType `thrift:"compression_type,7,optional" frugal:"7,optional,TFileCompressType" json:"compression_type,omitempty"`
+ Location *THiveLocationParams `thrift:"location,8,optional" frugal:"8,optional,THiveLocationParams" json:"location,omitempty"`
+ HadoopConfig map[string]string `thrift:"hadoop_config,9,optional" frugal:"9,optional,map